1 // SPDX-License-Identifier: GPL-2.0
3 * ring buffer based function tracer
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
49 #include "trace_output.h"
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
55 bool ring_buffer_expanded
;
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
64 static bool __read_mostly tracing_selftest_running
;
67 * If a tracer is running, we do not want to run SELFTEST.
69 bool __read_mostly tracing_selftest_disabled
;
71 /* Pipe tracepoints to printk */
72 struct trace_iterator
*tracepoint_print_iter
;
73 int tracepoint_printk
;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key
);
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt
[] = {
82 dummy_set_flag(struct trace_array
*tr
, u32 old_flags
, u32 bit
, int set
)
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save
);
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
100 static int tracing_disabled
= 1;
102 cpumask_var_t __read_mostly tracing_buffer_mask
;
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
120 enum ftrace_dump_mode ftrace_dump_on_oops
;
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning
;
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head
{
129 unsigned long length
;
132 union trace_eval_map_item
;
134 struct trace_eval_map_tail
{
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
139 union trace_eval_map_item
*next
;
140 const char *end
; /* points to NULL */
143 static DEFINE_MUTEX(trace_eval_mutex
);
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
152 union trace_eval_map_item
{
153 struct trace_eval_map map
;
154 struct trace_eval_map_head head
;
155 struct trace_eval_map_tail tail
;
158 static union trace_eval_map_item
*trace_eval_maps
;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
161 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
);
163 #define MAX_TRACER_SIZE 100
164 static char bootup_tracer_buf
[MAX_TRACER_SIZE
] __initdata
;
165 static char *default_bootup_tracer
;
167 static bool allocate_snapshot
;
169 static int __init
set_cmdline_ftrace(char *str
)
171 strlcpy(bootup_tracer_buf
, str
, MAX_TRACER_SIZE
);
172 default_bootup_tracer
= bootup_tracer_buf
;
173 /* We are using ftrace early, expand it */
174 ring_buffer_expanded
= true;
177 __setup("ftrace=", set_cmdline_ftrace
);
179 static int __init
set_ftrace_dump_on_oops(char *str
)
181 if (*str
++ != '=' || !*str
) {
182 ftrace_dump_on_oops
= DUMP_ALL
;
186 if (!strcmp("orig_cpu", str
)) {
187 ftrace_dump_on_oops
= DUMP_ORIG
;
193 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops
);
195 static int __init
stop_trace_on_warning(char *str
)
197 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
198 __disable_trace_on_warning
= 1;
201 __setup("traceoff_on_warning", stop_trace_on_warning
);
203 static int __init
boot_alloc_snapshot(char *str
)
205 allocate_snapshot
= true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded
= true;
210 __setup("alloc_snapshot", boot_alloc_snapshot
);
213 static char trace_boot_options_buf
[MAX_TRACER_SIZE
] __initdata
;
215 static int __init
set_trace_boot_options(char *str
)
217 strlcpy(trace_boot_options_buf
, str
, MAX_TRACER_SIZE
);
220 __setup("trace_options=", set_trace_boot_options
);
222 static char trace_boot_clock_buf
[MAX_TRACER_SIZE
] __initdata
;
223 static char *trace_boot_clock __initdata
;
225 static int __init
set_trace_boot_clock(char *str
)
227 strlcpy(trace_boot_clock_buf
, str
, MAX_TRACER_SIZE
);
228 trace_boot_clock
= trace_boot_clock_buf
;
231 __setup("trace_clock=", set_trace_boot_clock
);
233 static int __init
set_tracepoint_printk(char *str
)
235 if ((strcmp(str
, "=0") != 0 && strcmp(str
, "=off") != 0))
236 tracepoint_printk
= 1;
239 __setup("tp_printk", set_tracepoint_printk
);
241 unsigned long long ns2usecs(u64 nsec
)
248 /* trace_flags holds trace_options default values */
249 #define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
256 /* trace_options that are only supported by global_trace */
257 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
260 /* trace_flags that are default zero for instances */
261 #define ZEROED_TRACE_FLAGS \
262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
268 static struct trace_array global_trace
= {
269 .trace_flags
= TRACE_DEFAULT_FLAGS
,
272 LIST_HEAD(ftrace_trace_arrays
);
274 int trace_array_get(struct trace_array
*this_tr
)
276 struct trace_array
*tr
;
279 mutex_lock(&trace_types_lock
);
280 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
287 mutex_unlock(&trace_types_lock
);
292 static void __trace_array_put(struct trace_array
*this_tr
)
294 WARN_ON(!this_tr
->ref
);
298 void trace_array_put(struct trace_array
*this_tr
)
300 mutex_lock(&trace_types_lock
);
301 __trace_array_put(this_tr
);
302 mutex_unlock(&trace_types_lock
);
305 int call_filter_check_discard(struct trace_event_call
*call
, void *rec
,
306 struct ring_buffer
*buffer
,
307 struct ring_buffer_event
*event
)
309 if (unlikely(call
->flags
& TRACE_EVENT_FL_FILTERED
) &&
310 !filter_match_preds(call
->filter
, rec
)) {
311 __trace_event_discard_commit(buffer
, event
);
318 void trace_free_pid_list(struct trace_pid_list
*pid_list
)
320 vfree(pid_list
->pids
);
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
332 trace_find_filtered_pid(struct trace_pid_list
*filtered_pids
, pid_t search_pid
)
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
338 if (search_pid
>= filtered_pids
->pid_max
)
341 return test_bit(search_pid
, filtered_pids
->pids
);
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
354 trace_ignore_this_task(struct trace_pid_list
*filtered_pids
, struct task_struct
*task
)
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
363 return !trace_find_filtered_pid(filtered_pids
, task
->pid
);
367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
378 void trace_filter_add_remove_task(struct trace_pid_list
*pid_list
,
379 struct task_struct
*self
,
380 struct task_struct
*task
)
385 /* For forks, we only add if the forking task is listed */
387 if (!trace_find_filtered_pid(pid_list
, self
->pid
))
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task
->pid
>= pid_list
->pid_max
)
395 /* "self" is set for forks, and NULL for exits */
397 set_bit(task
->pid
, pid_list
->pids
);
399 clear_bit(task
->pid
, pid_list
->pids
);
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
414 void *trace_pid_next(struct trace_pid_list
*pid_list
, void *v
, loff_t
*pos
)
416 unsigned long pid
= (unsigned long)v
;
420 /* pid already is +1 of the actual prevous bit */
421 pid
= find_next_bit(pid_list
->pids
, pid_list
->pid_max
, pid
);
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid
< pid_list
->pid_max
)
425 return (void *)(pid
+ 1);
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
435 * This is used by seq_file "start" operation to start the iteration
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
441 void *trace_pid_start(struct trace_pid_list
*pid_list
, loff_t
*pos
)
446 pid
= find_first_bit(pid_list
->pids
, pid_list
->pid_max
);
447 if (pid
>= pid_list
->pid_max
)
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid
++; pid
&& l
< *pos
;
452 pid
= (unsigned long)trace_pid_next(pid_list
, (void *)pid
, &l
))
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
462 * Can be directly used by seq_file operations to display the current
465 int trace_pid_show(struct seq_file
*m
, void *v
)
467 unsigned long pid
= (unsigned long)v
- 1;
469 seq_printf(m
, "%lu\n", pid
);
473 /* 128 should be much more than enough */
474 #define PID_BUF_SIZE 127
476 int trace_pid_write(struct trace_pid_list
*filtered_pids
,
477 struct trace_pid_list
**new_pid_list
,
478 const char __user
*ubuf
, size_t cnt
)
480 struct trace_pid_list
*pid_list
;
481 struct trace_parser parser
;
489 if (trace_parser_get_init(&parser
, PID_BUF_SIZE
+ 1))
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
498 pid_list
= kmalloc(sizeof(*pid_list
), GFP_KERNEL
);
502 pid_list
->pid_max
= READ_ONCE(pid_max
);
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids
&& filtered_pids
->pid_max
> pid_list
->pid_max
)
506 pid_list
->pid_max
= filtered_pids
->pid_max
;
508 pid_list
->pids
= vzalloc((pid_list
->pid_max
+ 7) >> 3);
509 if (!pid_list
->pids
) {
515 /* copy the current bits to the new max */
516 for_each_set_bit(pid
, filtered_pids
->pids
,
517 filtered_pids
->pid_max
) {
518 set_bit(pid
, pid_list
->pids
);
527 ret
= trace_get_user(&parser
, ubuf
, cnt
, &pos
);
528 if (ret
< 0 || !trace_parser_loaded(&parser
))
536 if (kstrtoul(parser
.buffer
, 0, &val
))
538 if (val
>= pid_list
->pid_max
)
543 set_bit(pid
, pid_list
->pids
);
546 trace_parser_clear(&parser
);
549 trace_parser_put(&parser
);
552 trace_free_pid_list(pid_list
);
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list
);
563 *new_pid_list
= pid_list
;
568 static u64
buffer_ftrace_now(struct trace_buffer
*buf
, int cpu
)
572 /* Early boot up does not have a buffer yet */
574 return trace_clock_local();
576 ts
= ring_buffer_time_stamp(buf
->buffer
, cpu
);
577 ring_buffer_normalize_time_stamp(buf
->buffer
, cpu
, &ts
);
582 u64
ftrace_now(int cpu
)
584 return buffer_ftrace_now(&global_trace
.trace_buffer
, cpu
);
588 * tracing_is_enabled - Show if global_trace has been disabled
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
596 int tracing_is_enabled(void)
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
604 return !global_trace
.buffer_disabled
;
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
619 static unsigned long trace_buf_size
= TRACE_BUF_SIZE_DEFAULT
;
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer
*trace_types __read_mostly
;
625 * trace_types_lock is used to protect the trace_types list.
627 DEFINE_MUTEX(trace_types_lock
);
630 * serialize the access of the ring buffer
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
644 * These primitives allow multi process access to different cpu ring buffer
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
652 static DECLARE_RWSEM(all_cpu_access_lock
);
653 static DEFINE_PER_CPU(struct mutex
, cpu_access_lock
);
655 static inline void trace_access_lock(int cpu
)
657 if (cpu
== RING_BUFFER_ALL_CPUS
) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock
);
661 /* gain it for accessing a cpu ring buffer. */
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock
);
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock
, cpu
));
671 static inline void trace_access_unlock(int cpu
)
673 if (cpu
== RING_BUFFER_ALL_CPUS
) {
674 up_write(&all_cpu_access_lock
);
676 mutex_unlock(&per_cpu(cpu_access_lock
, cpu
));
677 up_read(&all_cpu_access_lock
);
681 static inline void trace_access_lock_init(void)
685 for_each_possible_cpu(cpu
)
686 mutex_init(&per_cpu(cpu_access_lock
, cpu
));
691 static DEFINE_MUTEX(access_lock
);
693 static inline void trace_access_lock(int cpu
)
696 mutex_lock(&access_lock
);
699 static inline void trace_access_unlock(int cpu
)
702 mutex_unlock(&access_lock
);
705 static inline void trace_access_lock_init(void)
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
714 int skip
, int pc
, struct pt_regs
*regs
);
715 static inline void ftrace_trace_stack(struct trace_array
*tr
,
716 struct ring_buffer
*buffer
,
718 int skip
, int pc
, struct pt_regs
*regs
);
721 static inline void __ftrace_trace_stack(struct ring_buffer
*buffer
,
723 int skip
, int pc
, struct pt_regs
*regs
)
726 static inline void ftrace_trace_stack(struct trace_array
*tr
,
727 struct ring_buffer
*buffer
,
729 int skip
, int pc
, struct pt_regs
*regs
)
735 static __always_inline
void
736 trace_event_setup(struct ring_buffer_event
*event
,
737 int type
, unsigned long flags
, int pc
)
739 struct trace_entry
*ent
= ring_buffer_event_data(event
);
741 tracing_generic_entry_update(ent
, flags
, pc
);
745 static __always_inline
struct ring_buffer_event
*
746 __trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
749 unsigned long flags
, int pc
)
751 struct ring_buffer_event
*event
;
753 event
= ring_buffer_lock_reserve(buffer
, len
);
755 trace_event_setup(event
, type
, flags
, pc
);
760 void tracer_tracing_on(struct trace_array
*tr
)
762 if (tr
->trace_buffer
.buffer
)
763 ring_buffer_record_on(tr
->trace_buffer
.buffer
);
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
772 tr
->buffer_disabled
= 0;
773 /* Make the flag seen by readers */
778 * tracing_on - enable tracing buffers
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
783 void tracing_on(void)
785 tracer_tracing_on(&global_trace
);
787 EXPORT_SYMBOL_GPL(tracing_on
);
790 static __always_inline
void
791 __buffer_unlock_commit(struct ring_buffer
*buffer
, struct ring_buffer_event
*event
)
793 __this_cpu_write(trace_taskinfo_save
, true);
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event
) == event
) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer
, event
->array
[0], &event
->array
[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt
);
802 ring_buffer_unlock_commit(buffer
, event
);
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
811 int __trace_puts(unsigned long ip
, const char *str
, int size
)
813 struct ring_buffer_event
*event
;
814 struct ring_buffer
*buffer
;
815 struct print_entry
*entry
;
816 unsigned long irq_flags
;
820 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
823 pc
= preempt_count();
825 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
828 alloc
= sizeof(*entry
) + size
+ 2; /* possible \n added */
830 local_save_flags(irq_flags
);
831 buffer
= global_trace
.trace_buffer
.buffer
;
832 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, alloc
,
837 entry
= ring_buffer_event_data(event
);
840 memcpy(&entry
->buf
, str
, size
);
842 /* Add a newline if necessary */
843 if (entry
->buf
[size
- 1] != '\n') {
844 entry
->buf
[size
] = '\n';
845 entry
->buf
[size
+ 1] = '\0';
847 entry
->buf
[size
] = '\0';
849 __buffer_unlock_commit(buffer
, event
);
850 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
854 EXPORT_SYMBOL_GPL(__trace_puts
);
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
861 int __trace_bputs(unsigned long ip
, const char *str
)
863 struct ring_buffer_event
*event
;
864 struct ring_buffer
*buffer
;
865 struct bputs_entry
*entry
;
866 unsigned long irq_flags
;
867 int size
= sizeof(struct bputs_entry
);
870 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
873 pc
= preempt_count();
875 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
878 local_save_flags(irq_flags
);
879 buffer
= global_trace
.trace_buffer
.buffer
;
880 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPUTS
, size
,
885 entry
= ring_buffer_event_data(event
);
889 __buffer_unlock_commit(buffer
, event
);
890 ftrace_trace_stack(&global_trace
, buffer
, irq_flags
, 4, pc
, NULL
);
894 EXPORT_SYMBOL_GPL(__trace_bputs
);
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 void tracing_snapshot_instance(struct trace_array
*tr
)
899 struct tracer
*tracer
= tr
->current_trace
;
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
908 if (!tr
->allocated_snapshot
) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer
->use_max_tr
) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
922 local_irq_save(flags
);
923 update_max_tr(tr
, current
, smp_processor_id());
924 local_irq_restore(flags
);
928 * tracing_snapshot - take a snapshot of the current buffer.
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
941 void tracing_snapshot(void)
943 struct trace_array
*tr
= &global_trace
;
945 tracing_snapshot_instance(tr
);
947 EXPORT_SYMBOL_GPL(tracing_snapshot
);
949 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
950 struct trace_buffer
*size_buf
, int cpu_id
);
951 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
);
953 int tracing_alloc_snapshot_instance(struct trace_array
*tr
)
957 if (!tr
->allocated_snapshot
) {
959 /* allocate spare buffer */
960 ret
= resize_buffer_duplicate_size(&tr
->max_buffer
,
961 &tr
->trace_buffer
, RING_BUFFER_ALL_CPUS
);
965 tr
->allocated_snapshot
= true;
971 static void free_snapshot(struct trace_array
*tr
)
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
978 ring_buffer_resize(tr
->max_buffer
.buffer
, 1, RING_BUFFER_ALL_CPUS
);
979 set_buffer_entries(&tr
->max_buffer
, 1);
980 tracing_reset_online_cpus(&tr
->max_buffer
);
981 tr
->allocated_snapshot
= false;
985 * tracing_alloc_snapshot - allocate snapshot buffer.
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
994 int tracing_alloc_snapshot(void)
996 struct trace_array
*tr
= &global_trace
;
999 ret
= tracing_alloc_snapshot_instance(tr
);
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1009 * This is similar to tracing_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1017 void tracing_snapshot_alloc(void)
1021 ret
= tracing_alloc_snapshot();
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1029 void tracing_snapshot(void)
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1033 EXPORT_SYMBOL_GPL(tracing_snapshot
);
1034 int tracing_alloc_snapshot(void)
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot
);
1040 void tracing_snapshot_alloc(void)
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1048 void tracer_tracing_off(struct trace_array
*tr
)
1050 if (tr
->trace_buffer
.buffer
)
1051 ring_buffer_record_off(tr
->trace_buffer
.buffer
);
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1060 tr
->buffer_disabled
= 1;
1061 /* Make the flag seen by readers */
1066 * tracing_off - turn off tracing buffers
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1073 void tracing_off(void)
1075 tracer_tracing_off(&global_trace
);
1077 EXPORT_SYMBOL_GPL(tracing_off
);
1079 void disable_trace_on_warning(void)
1081 if (__disable_trace_on_warning
)
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1089 * Shows real state of the ring buffer if it is enabled or not.
1091 bool tracer_tracing_is_on(struct trace_array
*tr
)
1093 if (tr
->trace_buffer
.buffer
)
1094 return ring_buffer_record_is_on(tr
->trace_buffer
.buffer
);
1095 return !tr
->buffer_disabled
;
1099 * tracing_is_on - show state of ring buffers enabled
1101 int tracing_is_on(void)
1103 return tracer_tracing_is_on(&global_trace
);
1105 EXPORT_SYMBOL_GPL(tracing_is_on
);
1107 static int __init
set_buf_size(char *str
)
1109 unsigned long buf_size
;
1113 buf_size
= memparse(str
, &str
);
1114 /* nr_entries can not be zero */
1117 trace_buf_size
= buf_size
;
1120 __setup("trace_buf_size=", set_buf_size
);
1122 static int __init
set_tracing_thresh(char *str
)
1124 unsigned long threshold
;
1129 ret
= kstrtoul(str
, 0, &threshold
);
1132 tracing_thresh
= threshold
* 1000;
1135 __setup("tracing_thresh=", set_tracing_thresh
);
1137 unsigned long nsecs_to_usecs(unsigned long nsecs
)
1139 return nsecs
/ 1000;
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the evals (enum) were defined.
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options
[] = {
1160 int in_ns
; /* is this clock in nanoseconds? */
1161 } trace_clocks
[] = {
1162 { trace_clock_local
, "local", 1 },
1163 { trace_clock_global
, "global", 1 },
1164 { trace_clock_counter
, "counter", 0 },
1165 { trace_clock_jiffies
, "uptime", 0 },
1166 { trace_clock
, "perf", 1 },
1167 { ktime_get_mono_fast_ns
, "mono", 1 },
1168 { ktime_get_raw_fast_ns
, "mono_raw", 1 },
1169 { ktime_get_boot_fast_ns
, "boot", 1 },
1173 bool trace_clock_in_ns(struct trace_array
*tr
)
1175 if (trace_clocks
[tr
->clock_id
].in_ns
)
1182 * trace_parser_get_init - gets the buffer for trace parser
1184 int trace_parser_get_init(struct trace_parser
*parser
, int size
)
1186 memset(parser
, 0, sizeof(*parser
));
1188 parser
->buffer
= kmalloc(size
, GFP_KERNEL
);
1189 if (!parser
->buffer
)
1192 parser
->size
= size
;
1197 * trace_parser_put - frees the buffer for trace parser
1199 void trace_parser_put(struct trace_parser
*parser
)
1201 kfree(parser
->buffer
);
1202 parser
->buffer
= NULL
;
1206 * trace_get_user - reads the user input string separated by space
1207 * (matched by isspace(ch))
1209 * For each string found the 'struct trace_parser' is updated,
1210 * and the function returns.
1212 * Returns number of bytes read.
1214 * See kernel/trace/trace.h for 'struct trace_parser' details.
1216 int trace_get_user(struct trace_parser
*parser
, const char __user
*ubuf
,
1217 size_t cnt
, loff_t
*ppos
)
1224 trace_parser_clear(parser
);
1226 ret
= get_user(ch
, ubuf
++);
1234 * The parser is not finished with the last write,
1235 * continue reading the user input without skipping spaces.
1237 if (!parser
->cont
) {
1238 /* skip white space */
1239 while (cnt
&& isspace(ch
)) {
1240 ret
= get_user(ch
, ubuf
++);
1249 /* only spaces were written */
1250 if (isspace(ch
) || !ch
) {
1257 /* read the non-space input */
1258 while (cnt
&& !isspace(ch
) && ch
) {
1259 if (parser
->idx
< parser
->size
- 1)
1260 parser
->buffer
[parser
->idx
++] = ch
;
1265 ret
= get_user(ch
, ubuf
++);
1272 /* We either got finished input or we have to wait for another call. */
1273 if (isspace(ch
) || !ch
) {
1274 parser
->buffer
[parser
->idx
] = 0;
1275 parser
->cont
= false;
1276 } else if (parser
->idx
< parser
->size
- 1) {
1277 parser
->cont
= true;
1278 parser
->buffer
[parser
->idx
++] = ch
;
1279 /* Make sure the parsed string always terminates with '\0'. */
1280 parser
->buffer
[parser
->idx
] = 0;
1293 /* TODO add a seq_buf_to_buffer() */
1294 static ssize_t
trace_seq_to_buffer(struct trace_seq
*s
, void *buf
, size_t cnt
)
1298 if (trace_seq_used(s
) <= s
->seq
.readpos
)
1301 len
= trace_seq_used(s
) - s
->seq
.readpos
;
1304 memcpy(buf
, s
->buffer
+ s
->seq
.readpos
, cnt
);
1306 s
->seq
.readpos
+= cnt
;
1310 unsigned long __read_mostly tracing_thresh
;
1312 #ifdef CONFIG_TRACER_MAX_TRACE
1314 * Copy the new maximum trace into the separate maximum-trace
1315 * structure. (this way the maximum trace is permanently saved,
1316 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1319 __update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1321 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
1322 struct trace_buffer
*max_buf
= &tr
->max_buffer
;
1323 struct trace_array_cpu
*data
= per_cpu_ptr(trace_buf
->data
, cpu
);
1324 struct trace_array_cpu
*max_data
= per_cpu_ptr(max_buf
->data
, cpu
);
1327 max_buf
->time_start
= data
->preempt_timestamp
;
1329 max_data
->saved_latency
= tr
->max_latency
;
1330 max_data
->critical_start
= data
->critical_start
;
1331 max_data
->critical_end
= data
->critical_end
;
1333 memcpy(max_data
->comm
, tsk
->comm
, TASK_COMM_LEN
);
1334 max_data
->pid
= tsk
->pid
;
1336 * If tsk == current, then use current_uid(), as that does not use
1337 * RCU. The irq tracer can be called out of RCU scope.
1340 max_data
->uid
= current_uid();
1342 max_data
->uid
= task_uid(tsk
);
1344 max_data
->nice
= tsk
->static_prio
- 20 - MAX_RT_PRIO
;
1345 max_data
->policy
= tsk
->policy
;
1346 max_data
->rt_priority
= tsk
->rt_priority
;
1348 /* record this tasks comm */
1349 tracing_record_cmdline(tsk
);
1353 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1355 * @tsk: the task with the latency
1356 * @cpu: The cpu that initiated the trace.
1358 * Flip the buffers between the @tr and the max_tr and record information
1359 * about which task was the cause of this latency.
1362 update_max_tr(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1367 WARN_ON_ONCE(!irqs_disabled());
1369 if (!tr
->allocated_snapshot
) {
1370 /* Only the nop tracer should hit this when disabling */
1371 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1375 arch_spin_lock(&tr
->max_lock
);
1377 /* Inherit the recordable setting from trace_buffer */
1378 if (ring_buffer_record_is_set_on(tr
->trace_buffer
.buffer
))
1379 ring_buffer_record_on(tr
->max_buffer
.buffer
);
1381 ring_buffer_record_off(tr
->max_buffer
.buffer
);
1383 swap(tr
->trace_buffer
.buffer
, tr
->max_buffer
.buffer
);
1385 __update_max_tr(tr
, tsk
, cpu
);
1386 arch_spin_unlock(&tr
->max_lock
);
1390 * update_max_tr_single - only copy one trace over, and reset the rest
1392 * @tsk - task with the latency
1393 * @cpu - the cpu of the buffer to copy.
1395 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1398 update_max_tr_single(struct trace_array
*tr
, struct task_struct
*tsk
, int cpu
)
1405 WARN_ON_ONCE(!irqs_disabled());
1406 if (!tr
->allocated_snapshot
) {
1407 /* Only the nop tracer should hit this when disabling */
1408 WARN_ON_ONCE(tr
->current_trace
!= &nop_trace
);
1412 arch_spin_lock(&tr
->max_lock
);
1414 ret
= ring_buffer_swap_cpu(tr
->max_buffer
.buffer
, tr
->trace_buffer
.buffer
, cpu
);
1416 if (ret
== -EBUSY
) {
1418 * We failed to swap the buffer due to a commit taking
1419 * place on this CPU. We fail to record, but we reset
1420 * the max trace buffer (no one writes directly to it)
1421 * and flag that it failed.
1423 trace_array_printk_buf(tr
->max_buffer
.buffer
, _THIS_IP_
,
1424 "Failed to swap buffers due to commit in progress\n");
1427 WARN_ON_ONCE(ret
&& ret
!= -EAGAIN
&& ret
!= -EBUSY
);
1429 __update_max_tr(tr
, tsk
, cpu
);
1430 arch_spin_unlock(&tr
->max_lock
);
1432 #endif /* CONFIG_TRACER_MAX_TRACE */
1434 static int wait_on_pipe(struct trace_iterator
*iter
, int full
)
1436 /* Iterators are static, they should be filled or empty */
1437 if (trace_buffer_iter(iter
, iter
->cpu_file
))
1440 return ring_buffer_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
1444 #ifdef CONFIG_FTRACE_STARTUP_TEST
1445 static bool selftests_can_run
;
1447 struct trace_selftests
{
1448 struct list_head list
;
1449 struct tracer
*type
;
1452 static LIST_HEAD(postponed_selftests
);
1454 static int save_selftest(struct tracer
*type
)
1456 struct trace_selftests
*selftest
;
1458 selftest
= kmalloc(sizeof(*selftest
), GFP_KERNEL
);
1462 selftest
->type
= type
;
1463 list_add(&selftest
->list
, &postponed_selftests
);
1467 static int run_tracer_selftest(struct tracer
*type
)
1469 struct trace_array
*tr
= &global_trace
;
1470 struct tracer
*saved_tracer
= tr
->current_trace
;
1473 if (!type
->selftest
|| tracing_selftest_disabled
)
1477 * If a tracer registers early in boot up (before scheduling is
1478 * initialized and such), then do not run its selftests yet.
1479 * Instead, run it a little later in the boot process.
1481 if (!selftests_can_run
)
1482 return save_selftest(type
);
1485 * Run a selftest on this tracer.
1486 * Here we reset the trace buffer, and set the current
1487 * tracer to be this tracer. The tracer can then run some
1488 * internal tracing to verify that everything is in order.
1489 * If we fail, we do not register this tracer.
1491 tracing_reset_online_cpus(&tr
->trace_buffer
);
1493 tr
->current_trace
= type
;
1495 #ifdef CONFIG_TRACER_MAX_TRACE
1496 if (type
->use_max_tr
) {
1497 /* If we expanded the buffers, make sure the max is expanded too */
1498 if (ring_buffer_expanded
)
1499 ring_buffer_resize(tr
->max_buffer
.buffer
, trace_buf_size
,
1500 RING_BUFFER_ALL_CPUS
);
1501 tr
->allocated_snapshot
= true;
1505 /* the test is responsible for initializing and enabling */
1506 pr_info("Testing tracer %s: ", type
->name
);
1507 ret
= type
->selftest(type
, tr
);
1508 /* the test is responsible for resetting too */
1509 tr
->current_trace
= saved_tracer
;
1511 printk(KERN_CONT
"FAILED!\n");
1512 /* Add the warning after printing 'FAILED' */
1516 /* Only reset on passing, to avoid touching corrupted buffers */
1517 tracing_reset_online_cpus(&tr
->trace_buffer
);
1519 #ifdef CONFIG_TRACER_MAX_TRACE
1520 if (type
->use_max_tr
) {
1521 tr
->allocated_snapshot
= false;
1523 /* Shrink the max buffer again */
1524 if (ring_buffer_expanded
)
1525 ring_buffer_resize(tr
->max_buffer
.buffer
, 1,
1526 RING_BUFFER_ALL_CPUS
);
1530 printk(KERN_CONT
"PASSED\n");
1534 static __init
int init_trace_selftests(void)
1536 struct trace_selftests
*p
, *n
;
1537 struct tracer
*t
, **last
;
1540 selftests_can_run
= true;
1542 mutex_lock(&trace_types_lock
);
1544 if (list_empty(&postponed_selftests
))
1547 pr_info("Running postponed tracer tests:\n");
1549 list_for_each_entry_safe(p
, n
, &postponed_selftests
, list
) {
1550 ret
= run_tracer_selftest(p
->type
);
1551 /* If the test fails, then warn and remove from available_tracers */
1553 WARN(1, "tracer: %s failed selftest, disabling\n",
1555 last
= &trace_types
;
1556 for (t
= trace_types
; t
; t
= t
->next
) {
1569 mutex_unlock(&trace_types_lock
);
1573 core_initcall(init_trace_selftests
);
1575 static inline int run_tracer_selftest(struct tracer
*type
)
1579 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1581 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
);
1583 static void __init
apply_trace_boot_options(void);
1586 * register_tracer - register a tracer with the ftrace system.
1587 * @type - the plugin for the tracer
1589 * Register a new plugin tracer.
1591 int __init
register_tracer(struct tracer
*type
)
1597 pr_info("Tracer must have a name\n");
1601 if (strlen(type
->name
) >= MAX_TRACER_SIZE
) {
1602 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE
);
1606 mutex_lock(&trace_types_lock
);
1608 tracing_selftest_running
= true;
1610 for (t
= trace_types
; t
; t
= t
->next
) {
1611 if (strcmp(type
->name
, t
->name
) == 0) {
1613 pr_info("Tracer %s already registered\n",
1620 if (!type
->set_flag
)
1621 type
->set_flag
= &dummy_set_flag
;
1623 /*allocate a dummy tracer_flags*/
1624 type
->flags
= kmalloc(sizeof(*type
->flags
), GFP_KERNEL
);
1629 type
->flags
->val
= 0;
1630 type
->flags
->opts
= dummy_tracer_opt
;
1632 if (!type
->flags
->opts
)
1633 type
->flags
->opts
= dummy_tracer_opt
;
1635 /* store the tracer for __set_tracer_option */
1636 type
->flags
->trace
= type
;
1638 ret
= run_tracer_selftest(type
);
1642 type
->next
= trace_types
;
1644 add_tracer_options(&global_trace
, type
);
1647 tracing_selftest_running
= false;
1648 mutex_unlock(&trace_types_lock
);
1650 if (ret
|| !default_bootup_tracer
)
1653 if (strncmp(default_bootup_tracer
, type
->name
, MAX_TRACER_SIZE
))
1656 printk(KERN_INFO
"Starting tracer '%s'\n", type
->name
);
1657 /* Do we want this tracer to start on bootup? */
1658 tracing_set_tracer(&global_trace
, type
->name
);
1659 default_bootup_tracer
= NULL
;
1661 apply_trace_boot_options();
1663 /* disable other selftests, since this will break it. */
1664 tracing_selftest_disabled
= true;
1665 #ifdef CONFIG_FTRACE_STARTUP_TEST
1666 printk(KERN_INFO
"Disabling FTRACE selftests due to running tracer '%s'\n",
1674 void tracing_reset(struct trace_buffer
*buf
, int cpu
)
1676 struct ring_buffer
*buffer
= buf
->buffer
;
1681 ring_buffer_record_disable(buffer
);
1683 /* Make sure all commits have finished */
1684 synchronize_sched();
1685 ring_buffer_reset_cpu(buffer
, cpu
);
1687 ring_buffer_record_enable(buffer
);
1690 void tracing_reset_online_cpus(struct trace_buffer
*buf
)
1692 struct ring_buffer
*buffer
= buf
->buffer
;
1698 ring_buffer_record_disable(buffer
);
1700 /* Make sure all commits have finished */
1701 synchronize_sched();
1703 buf
->time_start
= buffer_ftrace_now(buf
, buf
->cpu
);
1705 for_each_online_cpu(cpu
)
1706 ring_buffer_reset_cpu(buffer
, cpu
);
1708 ring_buffer_record_enable(buffer
);
1711 /* Must have trace_types_lock held */
1712 void tracing_reset_all_online_cpus(void)
1714 struct trace_array
*tr
;
1716 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
1717 if (!tr
->clear_trace
)
1719 tr
->clear_trace
= false;
1720 tracing_reset_online_cpus(&tr
->trace_buffer
);
1721 #ifdef CONFIG_TRACER_MAX_TRACE
1722 tracing_reset_online_cpus(&tr
->max_buffer
);
1727 static int *tgid_map
;
1729 #define SAVED_CMDLINES_DEFAULT 128
1730 #define NO_CMDLINE_MAP UINT_MAX
1731 static arch_spinlock_t trace_cmdline_lock
= __ARCH_SPIN_LOCK_UNLOCKED
;
1732 struct saved_cmdlines_buffer
{
1733 unsigned map_pid_to_cmdline
[PID_MAX_DEFAULT
+1];
1734 unsigned *map_cmdline_to_pid
;
1735 unsigned cmdline_num
;
1737 char *saved_cmdlines
;
1739 static struct saved_cmdlines_buffer
*savedcmd
;
1741 /* temporary disable recording */
1742 static atomic_t trace_record_taskinfo_disabled __read_mostly
;
1744 static inline char *get_saved_cmdlines(int idx
)
1746 return &savedcmd
->saved_cmdlines
[idx
* TASK_COMM_LEN
];
1749 static inline void set_cmdline(int idx
, const char *cmdline
)
1751 memcpy(get_saved_cmdlines(idx
), cmdline
, TASK_COMM_LEN
);
1754 static int allocate_cmdlines_buffer(unsigned int val
,
1755 struct saved_cmdlines_buffer
*s
)
1757 s
->map_cmdline_to_pid
= kmalloc_array(val
,
1758 sizeof(*s
->map_cmdline_to_pid
),
1760 if (!s
->map_cmdline_to_pid
)
1763 s
->saved_cmdlines
= kmalloc_array(TASK_COMM_LEN
, val
, GFP_KERNEL
);
1764 if (!s
->saved_cmdlines
) {
1765 kfree(s
->map_cmdline_to_pid
);
1770 s
->cmdline_num
= val
;
1771 memset(&s
->map_pid_to_cmdline
, NO_CMDLINE_MAP
,
1772 sizeof(s
->map_pid_to_cmdline
));
1773 memset(s
->map_cmdline_to_pid
, NO_CMDLINE_MAP
,
1774 val
* sizeof(*s
->map_cmdline_to_pid
));
1779 static int trace_create_savedcmd(void)
1783 savedcmd
= kmalloc(sizeof(*savedcmd
), GFP_KERNEL
);
1787 ret
= allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT
, savedcmd
);
1797 int is_tracing_stopped(void)
1799 return global_trace
.stop_count
;
1803 * tracing_start - quick start of the tracer
1805 * If tracing is enabled but was stopped by tracing_stop,
1806 * this will start the tracer back up.
1808 void tracing_start(void)
1810 struct ring_buffer
*buffer
;
1811 unsigned long flags
;
1813 if (tracing_disabled
)
1816 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1817 if (--global_trace
.stop_count
) {
1818 if (global_trace
.stop_count
< 0) {
1819 /* Someone screwed up their debugging */
1821 global_trace
.stop_count
= 0;
1826 /* Prevent the buffers from switching */
1827 arch_spin_lock(&global_trace
.max_lock
);
1829 buffer
= global_trace
.trace_buffer
.buffer
;
1831 ring_buffer_record_enable(buffer
);
1833 #ifdef CONFIG_TRACER_MAX_TRACE
1834 buffer
= global_trace
.max_buffer
.buffer
;
1836 ring_buffer_record_enable(buffer
);
1839 arch_spin_unlock(&global_trace
.max_lock
);
1842 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1845 static void tracing_start_tr(struct trace_array
*tr
)
1847 struct ring_buffer
*buffer
;
1848 unsigned long flags
;
1850 if (tracing_disabled
)
1853 /* If global, we need to also start the max tracer */
1854 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1855 return tracing_start();
1857 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1859 if (--tr
->stop_count
) {
1860 if (tr
->stop_count
< 0) {
1861 /* Someone screwed up their debugging */
1868 buffer
= tr
->trace_buffer
.buffer
;
1870 ring_buffer_record_enable(buffer
);
1873 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1877 * tracing_stop - quick stop of the tracer
1879 * Light weight way to stop tracing. Use in conjunction with
1882 void tracing_stop(void)
1884 struct ring_buffer
*buffer
;
1885 unsigned long flags
;
1887 raw_spin_lock_irqsave(&global_trace
.start_lock
, flags
);
1888 if (global_trace
.stop_count
++)
1891 /* Prevent the buffers from switching */
1892 arch_spin_lock(&global_trace
.max_lock
);
1894 buffer
= global_trace
.trace_buffer
.buffer
;
1896 ring_buffer_record_disable(buffer
);
1898 #ifdef CONFIG_TRACER_MAX_TRACE
1899 buffer
= global_trace
.max_buffer
.buffer
;
1901 ring_buffer_record_disable(buffer
);
1904 arch_spin_unlock(&global_trace
.max_lock
);
1907 raw_spin_unlock_irqrestore(&global_trace
.start_lock
, flags
);
1910 static void tracing_stop_tr(struct trace_array
*tr
)
1912 struct ring_buffer
*buffer
;
1913 unsigned long flags
;
1915 /* If global, we need to also stop the max tracer */
1916 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
1917 return tracing_stop();
1919 raw_spin_lock_irqsave(&tr
->start_lock
, flags
);
1920 if (tr
->stop_count
++)
1923 buffer
= tr
->trace_buffer
.buffer
;
1925 ring_buffer_record_disable(buffer
);
1928 raw_spin_unlock_irqrestore(&tr
->start_lock
, flags
);
1931 static int trace_save_cmdline(struct task_struct
*tsk
)
1935 /* treat recording of idle task as a success */
1939 if (unlikely(tsk
->pid
> PID_MAX_DEFAULT
))
1943 * It's not the end of the world if we don't get
1944 * the lock, but we also don't want to spin
1945 * nor do we want to disable interrupts,
1946 * so if we miss here, then better luck next time.
1948 if (!arch_spin_trylock(&trace_cmdline_lock
))
1951 idx
= savedcmd
->map_pid_to_cmdline
[tsk
->pid
];
1952 if (idx
== NO_CMDLINE_MAP
) {
1953 idx
= (savedcmd
->cmdline_idx
+ 1) % savedcmd
->cmdline_num
;
1956 * Check whether the cmdline buffer at idx has a pid
1957 * mapped. We are going to overwrite that entry so we
1958 * need to clear the map_pid_to_cmdline. Otherwise we
1959 * would read the new comm for the old pid.
1961 pid
= savedcmd
->map_cmdline_to_pid
[idx
];
1962 if (pid
!= NO_CMDLINE_MAP
)
1963 savedcmd
->map_pid_to_cmdline
[pid
] = NO_CMDLINE_MAP
;
1965 savedcmd
->map_cmdline_to_pid
[idx
] = tsk
->pid
;
1966 savedcmd
->map_pid_to_cmdline
[tsk
->pid
] = idx
;
1968 savedcmd
->cmdline_idx
= idx
;
1971 set_cmdline(idx
, tsk
->comm
);
1973 arch_spin_unlock(&trace_cmdline_lock
);
1978 static void __trace_find_cmdline(int pid
, char comm
[])
1983 strcpy(comm
, "<idle>");
1987 if (WARN_ON_ONCE(pid
< 0)) {
1988 strcpy(comm
, "<XXX>");
1992 if (pid
> PID_MAX_DEFAULT
) {
1993 strcpy(comm
, "<...>");
1997 map
= savedcmd
->map_pid_to_cmdline
[pid
];
1998 if (map
!= NO_CMDLINE_MAP
)
1999 strlcpy(comm
, get_saved_cmdlines(map
), TASK_COMM_LEN
);
2001 strcpy(comm
, "<...>");
2004 void trace_find_cmdline(int pid
, char comm
[])
2007 arch_spin_lock(&trace_cmdline_lock
);
2009 __trace_find_cmdline(pid
, comm
);
2011 arch_spin_unlock(&trace_cmdline_lock
);
2015 int trace_find_tgid(int pid
)
2017 if (unlikely(!tgid_map
|| !pid
|| pid
> PID_MAX_DEFAULT
))
2020 return tgid_map
[pid
];
2023 static int trace_save_tgid(struct task_struct
*tsk
)
2025 /* treat recording of idle task as a success */
2029 if (unlikely(!tgid_map
|| tsk
->pid
> PID_MAX_DEFAULT
))
2032 tgid_map
[tsk
->pid
] = tsk
->tgid
;
2036 static bool tracing_record_taskinfo_skip(int flags
)
2038 if (unlikely(!(flags
& (TRACE_RECORD_CMDLINE
| TRACE_RECORD_TGID
))))
2040 if (atomic_read(&trace_record_taskinfo_disabled
) || !tracing_is_on())
2042 if (!__this_cpu_read(trace_taskinfo_save
))
2048 * tracing_record_taskinfo - record the task info of a task
2050 * @task - task to record
2051 * @flags - TRACE_RECORD_CMDLINE for recording comm
2052 * - TRACE_RECORD_TGID for recording tgid
2054 void tracing_record_taskinfo(struct task_struct
*task
, int flags
)
2058 if (tracing_record_taskinfo_skip(flags
))
2062 * Record as much task information as possible. If some fail, continue
2063 * to try to record the others.
2065 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(task
);
2066 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(task
);
2068 /* If recording any information failed, retry again soon. */
2072 __this_cpu_write(trace_taskinfo_save
, false);
2076 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2078 * @prev - previous task during sched_switch
2079 * @next - next task during sched_switch
2080 * @flags - TRACE_RECORD_CMDLINE for recording comm
2081 * TRACE_RECORD_TGID for recording tgid
2083 void tracing_record_taskinfo_sched_switch(struct task_struct
*prev
,
2084 struct task_struct
*next
, int flags
)
2088 if (tracing_record_taskinfo_skip(flags
))
2092 * Record as much task information as possible. If some fail, continue
2093 * to try to record the others.
2095 done
= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(prev
);
2096 done
&= !(flags
& TRACE_RECORD_CMDLINE
) || trace_save_cmdline(next
);
2097 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(prev
);
2098 done
&= !(flags
& TRACE_RECORD_TGID
) || trace_save_tgid(next
);
2100 /* If recording any information failed, retry again soon. */
2104 __this_cpu_write(trace_taskinfo_save
, false);
2107 /* Helpers to record a specific task information */
2108 void tracing_record_cmdline(struct task_struct
*task
)
2110 tracing_record_taskinfo(task
, TRACE_RECORD_CMDLINE
);
2113 void tracing_record_tgid(struct task_struct
*task
)
2115 tracing_record_taskinfo(task
, TRACE_RECORD_TGID
);
2119 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2120 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2121 * simplifies those functions and keeps them in sync.
2123 enum print_line_t
trace_handle_return(struct trace_seq
*s
)
2125 return trace_seq_has_overflowed(s
) ?
2126 TRACE_TYPE_PARTIAL_LINE
: TRACE_TYPE_HANDLED
;
2128 EXPORT_SYMBOL_GPL(trace_handle_return
);
2131 tracing_generic_entry_update(struct trace_entry
*entry
, unsigned long flags
,
2134 struct task_struct
*tsk
= current
;
2136 entry
->preempt_count
= pc
& 0xff;
2137 entry
->pid
= (tsk
) ? tsk
->pid
: 0;
2139 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2140 (irqs_disabled_flags(flags
) ? TRACE_FLAG_IRQS_OFF
: 0) |
2142 TRACE_FLAG_IRQS_NOSUPPORT
|
2144 ((pc
& NMI_MASK
) ? TRACE_FLAG_NMI
: 0) |
2145 ((pc
& HARDIRQ_MASK
) ? TRACE_FLAG_HARDIRQ
: 0) |
2146 ((pc
& SOFTIRQ_OFFSET
) ? TRACE_FLAG_SOFTIRQ
: 0) |
2147 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED
: 0) |
2148 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED
: 0);
2150 EXPORT_SYMBOL_GPL(tracing_generic_entry_update
);
2152 struct ring_buffer_event
*
2153 trace_buffer_lock_reserve(struct ring_buffer
*buffer
,
2156 unsigned long flags
, int pc
)
2158 return __trace_buffer_lock_reserve(buffer
, type
, len
, flags
, pc
);
2161 DEFINE_PER_CPU(struct ring_buffer_event
*, trace_buffered_event
);
2162 DEFINE_PER_CPU(int, trace_buffered_event_cnt
);
2163 static int trace_buffered_event_ref
;
2166 * trace_buffered_event_enable - enable buffering events
2168 * When events are being filtered, it is quicker to use a temporary
2169 * buffer to write the event data into if there's a likely chance
2170 * that it will not be committed. The discard of the ring buffer
2171 * is not as fast as committing, and is much slower than copying
2174 * When an event is to be filtered, allocate per cpu buffers to
2175 * write the event data into, and if the event is filtered and discarded
2176 * it is simply dropped, otherwise, the entire data is to be committed
2179 void trace_buffered_event_enable(void)
2181 struct ring_buffer_event
*event
;
2185 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2187 if (trace_buffered_event_ref
++)
2190 for_each_tracing_cpu(cpu
) {
2191 page
= alloc_pages_node(cpu_to_node(cpu
),
2192 GFP_KERNEL
| __GFP_NORETRY
, 0);
2196 event
= page_address(page
);
2197 memset(event
, 0, sizeof(*event
));
2199 per_cpu(trace_buffered_event
, cpu
) = event
;
2202 if (cpu
== smp_processor_id() &&
2203 this_cpu_read(trace_buffered_event
) !=
2204 per_cpu(trace_buffered_event
, cpu
))
2211 trace_buffered_event_disable();
2214 static void enable_trace_buffered_event(void *data
)
2216 /* Probably not needed, but do it anyway */
2218 this_cpu_dec(trace_buffered_event_cnt
);
2221 static void disable_trace_buffered_event(void *data
)
2223 this_cpu_inc(trace_buffered_event_cnt
);
2227 * trace_buffered_event_disable - disable buffering events
2229 * When a filter is removed, it is faster to not use the buffered
2230 * events, and to commit directly into the ring buffer. Free up
2231 * the temp buffers when there are no more users. This requires
2232 * special synchronization with current events.
2234 void trace_buffered_event_disable(void)
2238 WARN_ON_ONCE(!mutex_is_locked(&event_mutex
));
2240 if (WARN_ON_ONCE(!trace_buffered_event_ref
))
2243 if (--trace_buffered_event_ref
)
2247 /* For each CPU, set the buffer as used. */
2248 smp_call_function_many(tracing_buffer_mask
,
2249 disable_trace_buffered_event
, NULL
, 1);
2252 /* Wait for all current users to finish */
2253 synchronize_sched();
2255 for_each_tracing_cpu(cpu
) {
2256 free_page((unsigned long)per_cpu(trace_buffered_event
, cpu
));
2257 per_cpu(trace_buffered_event
, cpu
) = NULL
;
2260 * Make sure trace_buffered_event is NULL before clearing
2261 * trace_buffered_event_cnt.
2266 /* Do the work on each cpu */
2267 smp_call_function_many(tracing_buffer_mask
,
2268 enable_trace_buffered_event
, NULL
, 1);
2272 static struct ring_buffer
*temp_buffer
;
2274 struct ring_buffer_event
*
2275 trace_event_buffer_lock_reserve(struct ring_buffer
**current_rb
,
2276 struct trace_event_file
*trace_file
,
2277 int type
, unsigned long len
,
2278 unsigned long flags
, int pc
)
2280 struct ring_buffer_event
*entry
;
2283 *current_rb
= trace_file
->tr
->trace_buffer
.buffer
;
2285 if (!ring_buffer_time_stamp_abs(*current_rb
) && (trace_file
->flags
&
2286 (EVENT_FILE_FL_SOFT_DISABLED
| EVENT_FILE_FL_FILTERED
)) &&
2287 (entry
= this_cpu_read(trace_buffered_event
))) {
2288 /* Try to use the per cpu buffer first */
2289 val
= this_cpu_inc_return(trace_buffered_event_cnt
);
2291 trace_event_setup(entry
, type
, flags
, pc
);
2292 entry
->array
[0] = len
;
2295 this_cpu_dec(trace_buffered_event_cnt
);
2298 entry
= __trace_buffer_lock_reserve(*current_rb
,
2299 type
, len
, flags
, pc
);
2301 * If tracing is off, but we have triggers enabled
2302 * we still need to look at the event data. Use the temp_buffer
2303 * to store the trace event for the tigger to use. It's recusive
2304 * safe and will not be recorded anywhere.
2306 if (!entry
&& trace_file
->flags
& EVENT_FILE_FL_TRIGGER_COND
) {
2307 *current_rb
= temp_buffer
;
2308 entry
= __trace_buffer_lock_reserve(*current_rb
,
2309 type
, len
, flags
, pc
);
2313 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve
);
2315 static DEFINE_SPINLOCK(tracepoint_iter_lock
);
2316 static DEFINE_MUTEX(tracepoint_printk_mutex
);
2318 static void output_printk(struct trace_event_buffer
*fbuffer
)
2320 struct trace_event_call
*event_call
;
2321 struct trace_event
*event
;
2322 unsigned long flags
;
2323 struct trace_iterator
*iter
= tracepoint_print_iter
;
2325 /* We should never get here if iter is NULL */
2326 if (WARN_ON_ONCE(!iter
))
2329 event_call
= fbuffer
->trace_file
->event_call
;
2330 if (!event_call
|| !event_call
->event
.funcs
||
2331 !event_call
->event
.funcs
->trace
)
2334 event
= &fbuffer
->trace_file
->event_call
->event
;
2336 spin_lock_irqsave(&tracepoint_iter_lock
, flags
);
2337 trace_seq_init(&iter
->seq
);
2338 iter
->ent
= fbuffer
->entry
;
2339 event_call
->event
.funcs
->trace(iter
, 0, event
);
2340 trace_seq_putc(&iter
->seq
, 0);
2341 printk("%s", iter
->seq
.buffer
);
2343 spin_unlock_irqrestore(&tracepoint_iter_lock
, flags
);
2346 int tracepoint_printk_sysctl(struct ctl_table
*table
, int write
,
2347 void __user
*buffer
, size_t *lenp
,
2350 int save_tracepoint_printk
;
2353 mutex_lock(&tracepoint_printk_mutex
);
2354 save_tracepoint_printk
= tracepoint_printk
;
2356 ret
= proc_dointvec(table
, write
, buffer
, lenp
, ppos
);
2359 * This will force exiting early, as tracepoint_printk
2360 * is always zero when tracepoint_printk_iter is not allocated
2362 if (!tracepoint_print_iter
)
2363 tracepoint_printk
= 0;
2365 if (save_tracepoint_printk
== tracepoint_printk
)
2368 if (tracepoint_printk
)
2369 static_key_enable(&tracepoint_printk_key
.key
);
2371 static_key_disable(&tracepoint_printk_key
.key
);
2374 mutex_unlock(&tracepoint_printk_mutex
);
2379 void trace_event_buffer_commit(struct trace_event_buffer
*fbuffer
)
2381 if (static_key_false(&tracepoint_printk_key
.key
))
2382 output_printk(fbuffer
);
2384 event_trigger_unlock_commit(fbuffer
->trace_file
, fbuffer
->buffer
,
2385 fbuffer
->event
, fbuffer
->entry
,
2386 fbuffer
->flags
, fbuffer
->pc
);
2388 EXPORT_SYMBOL_GPL(trace_event_buffer_commit
);
2393 * trace_buffer_unlock_commit_regs()
2394 * trace_event_buffer_commit()
2395 * trace_event_raw_event_xxx()
2397 # define STACK_SKIP 3
2399 void trace_buffer_unlock_commit_regs(struct trace_array
*tr
,
2400 struct ring_buffer
*buffer
,
2401 struct ring_buffer_event
*event
,
2402 unsigned long flags
, int pc
,
2403 struct pt_regs
*regs
)
2405 __buffer_unlock_commit(buffer
, event
);
2408 * If regs is not set, then skip the necessary functions.
2409 * Note, we can still get here via blktrace, wakeup tracer
2410 * and mmiotrace, but that's ok if they lose a function or
2411 * two. They are not that meaningful.
2413 ftrace_trace_stack(tr
, buffer
, flags
, regs
? 0 : STACK_SKIP
, pc
, regs
);
2414 ftrace_trace_userstack(buffer
, flags
, pc
);
2418 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2421 trace_buffer_unlock_commit_nostack(struct ring_buffer
*buffer
,
2422 struct ring_buffer_event
*event
)
2424 __buffer_unlock_commit(buffer
, event
);
2428 trace_process_export(struct trace_export
*export
,
2429 struct ring_buffer_event
*event
)
2431 struct trace_entry
*entry
;
2432 unsigned int size
= 0;
2434 entry
= ring_buffer_event_data(event
);
2435 size
= ring_buffer_event_length(event
);
2436 export
->write(export
, entry
, size
);
2439 static DEFINE_MUTEX(ftrace_export_lock
);
2441 static struct trace_export __rcu
*ftrace_exports_list __read_mostly
;
2443 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled
);
2445 static inline void ftrace_exports_enable(void)
2447 static_branch_enable(&ftrace_exports_enabled
);
2450 static inline void ftrace_exports_disable(void)
2452 static_branch_disable(&ftrace_exports_enabled
);
2455 void ftrace_exports(struct ring_buffer_event
*event
)
2457 struct trace_export
*export
;
2459 preempt_disable_notrace();
2461 export
= rcu_dereference_raw_notrace(ftrace_exports_list
);
2463 trace_process_export(export
, event
);
2464 export
= rcu_dereference_raw_notrace(export
->next
);
2467 preempt_enable_notrace();
2471 add_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2473 rcu_assign_pointer(export
->next
, *list
);
2475 * We are entering export into the list but another
2476 * CPU might be walking that list. We need to make sure
2477 * the export->next pointer is valid before another CPU sees
2478 * the export pointer included into the list.
2480 rcu_assign_pointer(*list
, export
);
2484 rm_trace_export(struct trace_export
**list
, struct trace_export
*export
)
2486 struct trace_export
**p
;
2488 for (p
= list
; *p
!= NULL
; p
= &(*p
)->next
)
2495 rcu_assign_pointer(*p
, (*p
)->next
);
2501 add_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2504 ftrace_exports_enable();
2506 add_trace_export(list
, export
);
2510 rm_ftrace_export(struct trace_export
**list
, struct trace_export
*export
)
2514 ret
= rm_trace_export(list
, export
);
2516 ftrace_exports_disable();
2521 int register_ftrace_export(struct trace_export
*export
)
2523 if (WARN_ON_ONCE(!export
->write
))
2526 mutex_lock(&ftrace_export_lock
);
2528 add_ftrace_export(&ftrace_exports_list
, export
);
2530 mutex_unlock(&ftrace_export_lock
);
2534 EXPORT_SYMBOL_GPL(register_ftrace_export
);
2536 int unregister_ftrace_export(struct trace_export
*export
)
2540 mutex_lock(&ftrace_export_lock
);
2542 ret
= rm_ftrace_export(&ftrace_exports_list
, export
);
2544 mutex_unlock(&ftrace_export_lock
);
2548 EXPORT_SYMBOL_GPL(unregister_ftrace_export
);
2551 trace_function(struct trace_array
*tr
,
2552 unsigned long ip
, unsigned long parent_ip
, unsigned long flags
,
2555 struct trace_event_call
*call
= &event_function
;
2556 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2557 struct ring_buffer_event
*event
;
2558 struct ftrace_entry
*entry
;
2560 event
= __trace_buffer_lock_reserve(buffer
, TRACE_FN
, sizeof(*entry
),
2564 entry
= ring_buffer_event_data(event
);
2566 entry
->parent_ip
= parent_ip
;
2568 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2569 if (static_branch_unlikely(&ftrace_exports_enabled
))
2570 ftrace_exports(event
);
2571 __buffer_unlock_commit(buffer
, event
);
2575 #ifdef CONFIG_STACKTRACE
2577 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2578 struct ftrace_stack
{
2579 unsigned long calls
[FTRACE_STACK_MAX_ENTRIES
];
2582 static DEFINE_PER_CPU(struct ftrace_stack
, ftrace_stack
);
2583 static DEFINE_PER_CPU(int, ftrace_stack_reserve
);
2585 static void __ftrace_trace_stack(struct ring_buffer
*buffer
,
2586 unsigned long flags
,
2587 int skip
, int pc
, struct pt_regs
*regs
)
2589 struct trace_event_call
*call
= &event_kernel_stack
;
2590 struct ring_buffer_event
*event
;
2591 struct stack_entry
*entry
;
2592 struct stack_trace trace
;
2594 int size
= FTRACE_STACK_ENTRIES
;
2596 trace
.nr_entries
= 0;
2600 * Add one, for this function and the call to save_stack_trace()
2601 * If regs is set, then these functions will not be in the way.
2603 #ifndef CONFIG_UNWINDER_ORC
2609 * Since events can happen in NMIs there's no safe way to
2610 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2611 * or NMI comes in, it will just have to use the default
2612 * FTRACE_STACK_SIZE.
2614 preempt_disable_notrace();
2616 use_stack
= __this_cpu_inc_return(ftrace_stack_reserve
);
2618 * We don't need any atomic variables, just a barrier.
2619 * If an interrupt comes in, we don't care, because it would
2620 * have exited and put the counter back to what we want.
2621 * We just need a barrier to keep gcc from moving things
2625 if (use_stack
== 1) {
2626 trace
.entries
= this_cpu_ptr(ftrace_stack
.calls
);
2627 trace
.max_entries
= FTRACE_STACK_MAX_ENTRIES
;
2630 save_stack_trace_regs(regs
, &trace
);
2632 save_stack_trace(&trace
);
2634 if (trace
.nr_entries
> size
)
2635 size
= trace
.nr_entries
;
2637 /* From now on, use_stack is a boolean */
2640 size
*= sizeof(unsigned long);
2642 event
= __trace_buffer_lock_reserve(buffer
, TRACE_STACK
,
2643 sizeof(*entry
) + size
, flags
, pc
);
2646 entry
= ring_buffer_event_data(event
);
2648 memset(&entry
->caller
, 0, size
);
2651 memcpy(&entry
->caller
, trace
.entries
,
2652 trace
.nr_entries
* sizeof(unsigned long));
2654 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2655 trace
.entries
= entry
->caller
;
2657 save_stack_trace_regs(regs
, &trace
);
2659 save_stack_trace(&trace
);
2662 entry
->size
= trace
.nr_entries
;
2664 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2665 __buffer_unlock_commit(buffer
, event
);
2668 /* Again, don't let gcc optimize things here */
2670 __this_cpu_dec(ftrace_stack_reserve
);
2671 preempt_enable_notrace();
2675 static inline void ftrace_trace_stack(struct trace_array
*tr
,
2676 struct ring_buffer
*buffer
,
2677 unsigned long flags
,
2678 int skip
, int pc
, struct pt_regs
*regs
)
2680 if (!(tr
->trace_flags
& TRACE_ITER_STACKTRACE
))
2683 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, regs
);
2686 void __trace_stack(struct trace_array
*tr
, unsigned long flags
, int skip
,
2689 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
2691 if (rcu_is_watching()) {
2692 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2697 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2698 * but if the above rcu_is_watching() failed, then the NMI
2699 * triggered someplace critical, and rcu_irq_enter() should
2700 * not be called from NMI.
2702 if (unlikely(in_nmi()))
2705 rcu_irq_enter_irqson();
2706 __ftrace_trace_stack(buffer
, flags
, skip
, pc
, NULL
);
2707 rcu_irq_exit_irqson();
2711 * trace_dump_stack - record a stack back trace in the trace buffer
2712 * @skip: Number of functions to skip (helper handlers)
2714 void trace_dump_stack(int skip
)
2716 unsigned long flags
;
2718 if (tracing_disabled
|| tracing_selftest_running
)
2721 local_save_flags(flags
);
2723 #ifndef CONFIG_UNWINDER_ORC
2724 /* Skip 1 to skip this function. */
2727 __ftrace_trace_stack(global_trace
.trace_buffer
.buffer
,
2728 flags
, skip
, preempt_count(), NULL
);
2730 EXPORT_SYMBOL_GPL(trace_dump_stack
);
2732 static DEFINE_PER_CPU(int, user_stack_count
);
2735 ftrace_trace_userstack(struct ring_buffer
*buffer
, unsigned long flags
, int pc
)
2737 struct trace_event_call
*call
= &event_user_stack
;
2738 struct ring_buffer_event
*event
;
2739 struct userstack_entry
*entry
;
2740 struct stack_trace trace
;
2742 if (!(global_trace
.trace_flags
& TRACE_ITER_USERSTACKTRACE
))
2746 * NMIs can not handle page faults, even with fix ups.
2747 * The save user stack can (and often does) fault.
2749 if (unlikely(in_nmi()))
2753 * prevent recursion, since the user stack tracing may
2754 * trigger other kernel events.
2757 if (__this_cpu_read(user_stack_count
))
2760 __this_cpu_inc(user_stack_count
);
2762 event
= __trace_buffer_lock_reserve(buffer
, TRACE_USER_STACK
,
2763 sizeof(*entry
), flags
, pc
);
2765 goto out_drop_count
;
2766 entry
= ring_buffer_event_data(event
);
2768 entry
->tgid
= current
->tgid
;
2769 memset(&entry
->caller
, 0, sizeof(entry
->caller
));
2771 trace
.nr_entries
= 0;
2772 trace
.max_entries
= FTRACE_STACK_ENTRIES
;
2774 trace
.entries
= entry
->caller
;
2776 save_stack_trace_user(&trace
);
2777 if (!call_filter_check_discard(call
, entry
, buffer
, event
))
2778 __buffer_unlock_commit(buffer
, event
);
2781 __this_cpu_dec(user_stack_count
);
2787 static void __trace_userstack(struct trace_array
*tr
, unsigned long flags
)
2789 ftrace_trace_userstack(tr
, flags
, preempt_count());
2793 #endif /* CONFIG_STACKTRACE */
2795 /* created for use with alloc_percpu */
2796 struct trace_buffer_struct
{
2798 char buffer
[4][TRACE_BUF_SIZE
];
2801 static struct trace_buffer_struct
*trace_percpu_buffer
;
2804 * Thise allows for lockless recording. If we're nested too deeply, then
2805 * this returns NULL.
2807 static char *get_trace_buf(void)
2809 struct trace_buffer_struct
*buffer
= this_cpu_ptr(trace_percpu_buffer
);
2811 if (!buffer
|| buffer
->nesting
>= 4)
2816 /* Interrupts must see nesting incremented before we use the buffer */
2818 return &buffer
->buffer
[buffer
->nesting
][0];
2821 static void put_trace_buf(void)
2823 /* Don't let the decrement of nesting leak before this */
2825 this_cpu_dec(trace_percpu_buffer
->nesting
);
2828 static int alloc_percpu_trace_buffer(void)
2830 struct trace_buffer_struct
*buffers
;
2832 buffers
= alloc_percpu(struct trace_buffer_struct
);
2833 if (WARN(!buffers
, "Could not allocate percpu trace_printk buffer"))
2836 trace_percpu_buffer
= buffers
;
2840 static int buffers_allocated
;
2842 void trace_printk_init_buffers(void)
2844 if (buffers_allocated
)
2847 if (alloc_percpu_trace_buffer())
2850 /* trace_printk() is for debug use only. Don't use it in production. */
2853 pr_warn("**********************************************************\n");
2854 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2856 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2858 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2859 pr_warn("** unsafe for production use. **\n");
2861 pr_warn("** If you see this message and you are not debugging **\n");
2862 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2864 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2865 pr_warn("**********************************************************\n");
2867 /* Expand the buffers to set size */
2868 tracing_update_buffers();
2870 buffers_allocated
= 1;
2873 * trace_printk_init_buffers() can be called by modules.
2874 * If that happens, then we need to start cmdline recording
2875 * directly here. If the global_trace.buffer is already
2876 * allocated here, then this was called by module code.
2878 if (global_trace
.trace_buffer
.buffer
)
2879 tracing_start_cmdline_record();
2882 void trace_printk_start_comm(void)
2884 /* Start tracing comms if trace printk is set */
2885 if (!buffers_allocated
)
2887 tracing_start_cmdline_record();
2890 static void trace_printk_start_stop_comm(int enabled
)
2892 if (!buffers_allocated
)
2896 tracing_start_cmdline_record();
2898 tracing_stop_cmdline_record();
2902 * trace_vbprintk - write binary msg to tracing buffer
2905 int trace_vbprintk(unsigned long ip
, const char *fmt
, va_list args
)
2907 struct trace_event_call
*call
= &event_bprint
;
2908 struct ring_buffer_event
*event
;
2909 struct ring_buffer
*buffer
;
2910 struct trace_array
*tr
= &global_trace
;
2911 struct bprint_entry
*entry
;
2912 unsigned long flags
;
2914 int len
= 0, size
, pc
;
2916 if (unlikely(tracing_selftest_running
|| tracing_disabled
))
2919 /* Don't pollute graph traces with trace_vprintk internals */
2920 pause_graph_tracing();
2922 pc
= preempt_count();
2923 preempt_disable_notrace();
2925 tbuffer
= get_trace_buf();
2931 len
= vbin_printf((u32
*)tbuffer
, TRACE_BUF_SIZE
/sizeof(int), fmt
, args
);
2933 if (len
> TRACE_BUF_SIZE
/sizeof(int) || len
< 0)
2936 local_save_flags(flags
);
2937 size
= sizeof(*entry
) + sizeof(u32
) * len
;
2938 buffer
= tr
->trace_buffer
.buffer
;
2939 event
= __trace_buffer_lock_reserve(buffer
, TRACE_BPRINT
, size
,
2943 entry
= ring_buffer_event_data(event
);
2947 memcpy(entry
->buf
, tbuffer
, sizeof(u32
) * len
);
2948 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
2949 __buffer_unlock_commit(buffer
, event
);
2950 ftrace_trace_stack(tr
, buffer
, flags
, 6, pc
, NULL
);
2957 preempt_enable_notrace();
2958 unpause_graph_tracing();
2962 EXPORT_SYMBOL_GPL(trace_vbprintk
);
2966 __trace_array_vprintk(struct ring_buffer
*buffer
,
2967 unsigned long ip
, const char *fmt
, va_list args
)
2969 struct trace_event_call
*call
= &event_print
;
2970 struct ring_buffer_event
*event
;
2971 int len
= 0, size
, pc
;
2972 struct print_entry
*entry
;
2973 unsigned long flags
;
2976 if (tracing_disabled
|| tracing_selftest_running
)
2979 /* Don't pollute graph traces with trace_vprintk internals */
2980 pause_graph_tracing();
2982 pc
= preempt_count();
2983 preempt_disable_notrace();
2986 tbuffer
= get_trace_buf();
2992 len
= vscnprintf(tbuffer
, TRACE_BUF_SIZE
, fmt
, args
);
2994 local_save_flags(flags
);
2995 size
= sizeof(*entry
) + len
+ 1;
2996 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
3000 entry
= ring_buffer_event_data(event
);
3003 memcpy(&entry
->buf
, tbuffer
, len
+ 1);
3004 if (!call_filter_check_discard(call
, entry
, buffer
, event
)) {
3005 __buffer_unlock_commit(buffer
, event
);
3006 ftrace_trace_stack(&global_trace
, buffer
, flags
, 6, pc
, NULL
);
3013 preempt_enable_notrace();
3014 unpause_graph_tracing();
3020 int trace_array_vprintk(struct trace_array
*tr
,
3021 unsigned long ip
, const char *fmt
, va_list args
)
3023 return __trace_array_vprintk(tr
->trace_buffer
.buffer
, ip
, fmt
, args
);
3027 int trace_array_printk(struct trace_array
*tr
,
3028 unsigned long ip
, const char *fmt
, ...)
3033 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3037 ret
= trace_array_vprintk(tr
, ip
, fmt
, ap
);
3043 int trace_array_printk_buf(struct ring_buffer
*buffer
,
3044 unsigned long ip
, const char *fmt
, ...)
3049 if (!(global_trace
.trace_flags
& TRACE_ITER_PRINTK
))
3053 ret
= __trace_array_vprintk(buffer
, ip
, fmt
, ap
);
3059 int trace_vprintk(unsigned long ip
, const char *fmt
, va_list args
)
3061 return trace_array_vprintk(&global_trace
, ip
, fmt
, args
);
3063 EXPORT_SYMBOL_GPL(trace_vprintk
);
3065 static void trace_iterator_increment(struct trace_iterator
*iter
)
3067 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, iter
->cpu
);
3071 ring_buffer_read(buf_iter
, NULL
);
3074 static struct trace_entry
*
3075 peek_next_entry(struct trace_iterator
*iter
, int cpu
, u64
*ts
,
3076 unsigned long *lost_events
)
3078 struct ring_buffer_event
*event
;
3079 struct ring_buffer_iter
*buf_iter
= trace_buffer_iter(iter
, cpu
);
3082 event
= ring_buffer_iter_peek(buf_iter
, ts
);
3084 event
= ring_buffer_peek(iter
->trace_buffer
->buffer
, cpu
, ts
,
3088 iter
->ent_size
= ring_buffer_event_length(event
);
3089 return ring_buffer_event_data(event
);
3095 static struct trace_entry
*
3096 __find_next_entry(struct trace_iterator
*iter
, int *ent_cpu
,
3097 unsigned long *missing_events
, u64
*ent_ts
)
3099 struct ring_buffer
*buffer
= iter
->trace_buffer
->buffer
;
3100 struct trace_entry
*ent
, *next
= NULL
;
3101 unsigned long lost_events
= 0, next_lost
= 0;
3102 int cpu_file
= iter
->cpu_file
;
3103 u64 next_ts
= 0, ts
;
3109 * If we are in a per_cpu trace file, don't bother by iterating over
3110 * all cpu and peek directly.
3112 if (cpu_file
> RING_BUFFER_ALL_CPUS
) {
3113 if (ring_buffer_empty_cpu(buffer
, cpu_file
))
3115 ent
= peek_next_entry(iter
, cpu_file
, ent_ts
, missing_events
);
3117 *ent_cpu
= cpu_file
;
3122 for_each_tracing_cpu(cpu
) {
3124 if (ring_buffer_empty_cpu(buffer
, cpu
))
3127 ent
= peek_next_entry(iter
, cpu
, &ts
, &lost_events
);
3130 * Pick the entry with the smallest timestamp:
3132 if (ent
&& (!next
|| ts
< next_ts
)) {
3136 next_lost
= lost_events
;
3137 next_size
= iter
->ent_size
;
3141 iter
->ent_size
= next_size
;
3144 *ent_cpu
= next_cpu
;
3150 *missing_events
= next_lost
;
3155 /* Find the next real entry, without updating the iterator itself */
3156 struct trace_entry
*trace_find_next_entry(struct trace_iterator
*iter
,
3157 int *ent_cpu
, u64
*ent_ts
)
3159 return __find_next_entry(iter
, ent_cpu
, NULL
, ent_ts
);
3162 /* Find the next real entry, and increment the iterator to the next entry */
3163 void *trace_find_next_entry_inc(struct trace_iterator
*iter
)
3165 iter
->ent
= __find_next_entry(iter
, &iter
->cpu
,
3166 &iter
->lost_events
, &iter
->ts
);
3169 trace_iterator_increment(iter
);
3171 return iter
->ent
? iter
: NULL
;
3174 static void trace_consume(struct trace_iterator
*iter
)
3176 ring_buffer_consume(iter
->trace_buffer
->buffer
, iter
->cpu
, &iter
->ts
,
3177 &iter
->lost_events
);
3180 static void *s_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
3182 struct trace_iterator
*iter
= m
->private;
3186 WARN_ON_ONCE(iter
->leftover
);
3190 /* can't go backwards */
3195 ent
= trace_find_next_entry_inc(iter
);
3199 while (ent
&& iter
->idx
< i
)
3200 ent
= trace_find_next_entry_inc(iter
);
3207 void tracing_iter_reset(struct trace_iterator
*iter
, int cpu
)
3209 struct ring_buffer_event
*event
;
3210 struct ring_buffer_iter
*buf_iter
;
3211 unsigned long entries
= 0;
3214 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= 0;
3216 buf_iter
= trace_buffer_iter(iter
, cpu
);
3220 ring_buffer_iter_reset(buf_iter
);
3223 * We could have the case with the max latency tracers
3224 * that a reset never took place on a cpu. This is evident
3225 * by the timestamp being before the start of the buffer.
3227 while ((event
= ring_buffer_iter_peek(buf_iter
, &ts
))) {
3228 if (ts
>= iter
->trace_buffer
->time_start
)
3231 ring_buffer_read(buf_iter
, NULL
);
3234 per_cpu_ptr(iter
->trace_buffer
->data
, cpu
)->skipped_entries
= entries
;
3238 * The current tracer is copied to avoid a global locking
3241 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
3243 struct trace_iterator
*iter
= m
->private;
3244 struct trace_array
*tr
= iter
->tr
;
3245 int cpu_file
= iter
->cpu_file
;
3251 * copy the tracer to avoid using a global lock all around.
3252 * iter->trace is a copy of current_trace, the pointer to the
3253 * name may be used instead of a strcmp(), as iter->trace->name
3254 * will point to the same string as current_trace->name.
3256 mutex_lock(&trace_types_lock
);
3257 if (unlikely(tr
->current_trace
&& iter
->trace
->name
!= tr
->current_trace
->name
))
3258 *iter
->trace
= *tr
->current_trace
;
3259 mutex_unlock(&trace_types_lock
);
3261 #ifdef CONFIG_TRACER_MAX_TRACE
3262 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3263 return ERR_PTR(-EBUSY
);
3266 if (!iter
->snapshot
)
3267 atomic_inc(&trace_record_taskinfo_disabled
);
3269 if (*pos
!= iter
->pos
) {
3274 if (cpu_file
== RING_BUFFER_ALL_CPUS
) {
3275 for_each_tracing_cpu(cpu
)
3276 tracing_iter_reset(iter
, cpu
);
3278 tracing_iter_reset(iter
, cpu_file
);
3281 for (p
= iter
; p
&& l
< *pos
; p
= s_next(m
, p
, &l
))
3286 * If we overflowed the seq_file before, then we want
3287 * to just reuse the trace_seq buffer again.
3293 p
= s_next(m
, p
, &l
);
3297 trace_event_read_lock();
3298 trace_access_lock(cpu_file
);
3302 static void s_stop(struct seq_file
*m
, void *p
)
3304 struct trace_iterator
*iter
= m
->private;
3306 #ifdef CONFIG_TRACER_MAX_TRACE
3307 if (iter
->snapshot
&& iter
->trace
->use_max_tr
)
3311 if (!iter
->snapshot
)
3312 atomic_dec(&trace_record_taskinfo_disabled
);
3314 trace_access_unlock(iter
->cpu_file
);
3315 trace_event_read_unlock();
3319 get_total_entries(struct trace_buffer
*buf
,
3320 unsigned long *total
, unsigned long *entries
)
3322 unsigned long count
;
3328 for_each_tracing_cpu(cpu
) {
3329 count
= ring_buffer_entries_cpu(buf
->buffer
, cpu
);
3331 * If this buffer has skipped entries, then we hold all
3332 * entries for the trace and we need to ignore the
3333 * ones before the time stamp.
3335 if (per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
) {
3336 count
-= per_cpu_ptr(buf
->data
, cpu
)->skipped_entries
;
3337 /* total is the same as the entries */
3341 ring_buffer_overrun_cpu(buf
->buffer
, cpu
);
3346 static void print_lat_help_header(struct seq_file
*m
)
3348 seq_puts(m
, "# _------=> CPU# \n"
3349 "# / _-----=> irqs-off \n"
3350 "# | / _----=> need-resched \n"
3351 "# || / _---=> hardirq/softirq \n"
3352 "# ||| / _--=> preempt-depth \n"
3354 "# cmd pid ||||| time | caller \n"
3355 "# \\ / ||||| \\ | / \n");
3358 static void print_event_info(struct trace_buffer
*buf
, struct seq_file
*m
)
3360 unsigned long total
;
3361 unsigned long entries
;
3363 get_total_entries(buf
, &total
, &entries
);
3364 seq_printf(m
, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3365 entries
, total
, num_online_cpus());
3369 static void print_func_help_header(struct trace_buffer
*buf
, struct seq_file
*m
,
3372 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3374 print_event_info(buf
, m
);
3376 seq_printf(m
, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid
? "TGID " : "");
3377 seq_printf(m
, "# | | %s | | |\n", tgid
? " | " : "");
3380 static void print_func_help_header_irq(struct trace_buffer
*buf
, struct seq_file
*m
,
3383 bool tgid
= flags
& TRACE_ITER_RECORD_TGID
;
3384 const char tgid_space
[] = " ";
3385 const char space
[] = " ";
3387 seq_printf(m
, "# %s _-----=> irqs-off\n",
3388 tgid
? tgid_space
: space
);
3389 seq_printf(m
, "# %s / _----=> need-resched\n",
3390 tgid
? tgid_space
: space
);
3391 seq_printf(m
, "# %s| / _---=> hardirq/softirq\n",
3392 tgid
? tgid_space
: space
);
3393 seq_printf(m
, "# %s|| / _--=> preempt-depth\n",
3394 tgid
? tgid_space
: space
);
3395 seq_printf(m
, "# %s||| / delay\n",
3396 tgid
? tgid_space
: space
);
3397 seq_printf(m
, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3398 tgid
? " TGID " : space
);
3399 seq_printf(m
, "# | | %s | |||| | |\n",
3400 tgid
? " | " : space
);
3404 print_trace_header(struct seq_file
*m
, struct trace_iterator
*iter
)
3406 unsigned long sym_flags
= (global_trace
.trace_flags
& TRACE_ITER_SYM_MASK
);
3407 struct trace_buffer
*buf
= iter
->trace_buffer
;
3408 struct trace_array_cpu
*data
= per_cpu_ptr(buf
->data
, buf
->cpu
);
3409 struct tracer
*type
= iter
->trace
;
3410 unsigned long entries
;
3411 unsigned long total
;
3412 const char *name
= "preemption";
3416 get_total_entries(buf
, &total
, &entries
);
3418 seq_printf(m
, "# %s latency trace v1.1.5 on %s\n",
3420 seq_puts(m
, "# -----------------------------------"
3421 "---------------------------------\n");
3422 seq_printf(m
, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3423 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3424 nsecs_to_usecs(data
->saved_latency
),
3428 #if defined(CONFIG_PREEMPT_NONE)
3430 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3432 #elif defined(CONFIG_PREEMPT)
3437 /* These are reserved for later use */
3440 seq_printf(m
, " #P:%d)\n", num_online_cpus());
3444 seq_puts(m
, "# -----------------\n");
3445 seq_printf(m
, "# | task: %.16s-%d "
3446 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3447 data
->comm
, data
->pid
,
3448 from_kuid_munged(seq_user_ns(m
), data
->uid
), data
->nice
,
3449 data
->policy
, data
->rt_priority
);
3450 seq_puts(m
, "# -----------------\n");
3452 if (data
->critical_start
) {
3453 seq_puts(m
, "# => started at: ");
3454 seq_print_ip_sym(&iter
->seq
, data
->critical_start
, sym_flags
);
3455 trace_print_seq(m
, &iter
->seq
);
3456 seq_puts(m
, "\n# => ended at: ");
3457 seq_print_ip_sym(&iter
->seq
, data
->critical_end
, sym_flags
);
3458 trace_print_seq(m
, &iter
->seq
);
3459 seq_puts(m
, "\n#\n");
3465 static void test_cpu_buff_start(struct trace_iterator
*iter
)
3467 struct trace_seq
*s
= &iter
->seq
;
3468 struct trace_array
*tr
= iter
->tr
;
3470 if (!(tr
->trace_flags
& TRACE_ITER_ANNOTATE
))
3473 if (!(iter
->iter_flags
& TRACE_FILE_ANNOTATE
))
3476 if (cpumask_available(iter
->started
) &&
3477 cpumask_test_cpu(iter
->cpu
, iter
->started
))
3480 if (per_cpu_ptr(iter
->trace_buffer
->data
, iter
->cpu
)->skipped_entries
)
3483 if (cpumask_available(iter
->started
))
3484 cpumask_set_cpu(iter
->cpu
, iter
->started
);
3486 /* Don't print started cpu buffer for the first entry of the trace */
3488 trace_seq_printf(s
, "##### CPU %u buffer started ####\n",
3492 static enum print_line_t
print_trace_fmt(struct trace_iterator
*iter
)
3494 struct trace_array
*tr
= iter
->tr
;
3495 struct trace_seq
*s
= &iter
->seq
;
3496 unsigned long sym_flags
= (tr
->trace_flags
& TRACE_ITER_SYM_MASK
);
3497 struct trace_entry
*entry
;
3498 struct trace_event
*event
;
3502 test_cpu_buff_start(iter
);
3504 event
= ftrace_find_event(entry
->type
);
3506 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3507 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3508 trace_print_lat_context(iter
);
3510 trace_print_context(iter
);
3513 if (trace_seq_has_overflowed(s
))
3514 return TRACE_TYPE_PARTIAL_LINE
;
3517 return event
->funcs
->trace(iter
, sym_flags
, event
);
3519 trace_seq_printf(s
, "Unknown type %d\n", entry
->type
);
3521 return trace_handle_return(s
);
3524 static enum print_line_t
print_raw_fmt(struct trace_iterator
*iter
)
3526 struct trace_array
*tr
= iter
->tr
;
3527 struct trace_seq
*s
= &iter
->seq
;
3528 struct trace_entry
*entry
;
3529 struct trace_event
*event
;
3533 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
)
3534 trace_seq_printf(s
, "%d %d %llu ",
3535 entry
->pid
, iter
->cpu
, iter
->ts
);
3537 if (trace_seq_has_overflowed(s
))
3538 return TRACE_TYPE_PARTIAL_LINE
;
3540 event
= ftrace_find_event(entry
->type
);
3542 return event
->funcs
->raw(iter
, 0, event
);
3544 trace_seq_printf(s
, "%d ?\n", entry
->type
);
3546 return trace_handle_return(s
);
3549 static enum print_line_t
print_hex_fmt(struct trace_iterator
*iter
)
3551 struct trace_array
*tr
= iter
->tr
;
3552 struct trace_seq
*s
= &iter
->seq
;
3553 unsigned char newline
= '\n';
3554 struct trace_entry
*entry
;
3555 struct trace_event
*event
;
3559 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3560 SEQ_PUT_HEX_FIELD(s
, entry
->pid
);
3561 SEQ_PUT_HEX_FIELD(s
, iter
->cpu
);
3562 SEQ_PUT_HEX_FIELD(s
, iter
->ts
);
3563 if (trace_seq_has_overflowed(s
))
3564 return TRACE_TYPE_PARTIAL_LINE
;
3567 event
= ftrace_find_event(entry
->type
);
3569 enum print_line_t ret
= event
->funcs
->hex(iter
, 0, event
);
3570 if (ret
!= TRACE_TYPE_HANDLED
)
3574 SEQ_PUT_FIELD(s
, newline
);
3576 return trace_handle_return(s
);
3579 static enum print_line_t
print_bin_fmt(struct trace_iterator
*iter
)
3581 struct trace_array
*tr
= iter
->tr
;
3582 struct trace_seq
*s
= &iter
->seq
;
3583 struct trace_entry
*entry
;
3584 struct trace_event
*event
;
3588 if (tr
->trace_flags
& TRACE_ITER_CONTEXT_INFO
) {
3589 SEQ_PUT_FIELD(s
, entry
->pid
);
3590 SEQ_PUT_FIELD(s
, iter
->cpu
);
3591 SEQ_PUT_FIELD(s
, iter
->ts
);
3592 if (trace_seq_has_overflowed(s
))
3593 return TRACE_TYPE_PARTIAL_LINE
;
3596 event
= ftrace_find_event(entry
->type
);
3597 return event
? event
->funcs
->binary(iter
, 0, event
) :
3601 int trace_empty(struct trace_iterator
*iter
)
3603 struct ring_buffer_iter
*buf_iter
;
3606 /* If we are looking at one CPU buffer, only check that one */
3607 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
3608 cpu
= iter
->cpu_file
;
3609 buf_iter
= trace_buffer_iter(iter
, cpu
);
3611 if (!ring_buffer_iter_empty(buf_iter
))
3614 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3620 for_each_tracing_cpu(cpu
) {
3621 buf_iter
= trace_buffer_iter(iter
, cpu
);
3623 if (!ring_buffer_iter_empty(buf_iter
))
3626 if (!ring_buffer_empty_cpu(iter
->trace_buffer
->buffer
, cpu
))
3634 /* Called with trace_event_read_lock() held. */
3635 enum print_line_t
print_trace_line(struct trace_iterator
*iter
)
3637 struct trace_array
*tr
= iter
->tr
;
3638 unsigned long trace_flags
= tr
->trace_flags
;
3639 enum print_line_t ret
;
3641 if (iter
->lost_events
) {
3642 trace_seq_printf(&iter
->seq
, "CPU:%d [LOST %lu EVENTS]\n",
3643 iter
->cpu
, iter
->lost_events
);
3644 if (trace_seq_has_overflowed(&iter
->seq
))
3645 return TRACE_TYPE_PARTIAL_LINE
;
3648 if (iter
->trace
&& iter
->trace
->print_line
) {
3649 ret
= iter
->trace
->print_line(iter
);
3650 if (ret
!= TRACE_TYPE_UNHANDLED
)
3654 if (iter
->ent
->type
== TRACE_BPUTS
&&
3655 trace_flags
& TRACE_ITER_PRINTK
&&
3656 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3657 return trace_print_bputs_msg_only(iter
);
3659 if (iter
->ent
->type
== TRACE_BPRINT
&&
3660 trace_flags
& TRACE_ITER_PRINTK
&&
3661 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3662 return trace_print_bprintk_msg_only(iter
);
3664 if (iter
->ent
->type
== TRACE_PRINT
&&
3665 trace_flags
& TRACE_ITER_PRINTK
&&
3666 trace_flags
& TRACE_ITER_PRINTK_MSGONLY
)
3667 return trace_print_printk_msg_only(iter
);
3669 if (trace_flags
& TRACE_ITER_BIN
)
3670 return print_bin_fmt(iter
);
3672 if (trace_flags
& TRACE_ITER_HEX
)
3673 return print_hex_fmt(iter
);
3675 if (trace_flags
& TRACE_ITER_RAW
)
3676 return print_raw_fmt(iter
);
3678 return print_trace_fmt(iter
);
3681 void trace_latency_header(struct seq_file
*m
)
3683 struct trace_iterator
*iter
= m
->private;
3684 struct trace_array
*tr
= iter
->tr
;
3686 /* print nothing if the buffers are empty */
3687 if (trace_empty(iter
))
3690 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
)
3691 print_trace_header(m
, iter
);
3693 if (!(tr
->trace_flags
& TRACE_ITER_VERBOSE
))
3694 print_lat_help_header(m
);
3697 void trace_default_header(struct seq_file
*m
)
3699 struct trace_iterator
*iter
= m
->private;
3700 struct trace_array
*tr
= iter
->tr
;
3701 unsigned long trace_flags
= tr
->trace_flags
;
3703 if (!(trace_flags
& TRACE_ITER_CONTEXT_INFO
))
3706 if (iter
->iter_flags
& TRACE_FILE_LAT_FMT
) {
3707 /* print nothing if the buffers are empty */
3708 if (trace_empty(iter
))
3710 print_trace_header(m
, iter
);
3711 if (!(trace_flags
& TRACE_ITER_VERBOSE
))
3712 print_lat_help_header(m
);
3714 if (!(trace_flags
& TRACE_ITER_VERBOSE
)) {
3715 if (trace_flags
& TRACE_ITER_IRQ_INFO
)
3716 print_func_help_header_irq(iter
->trace_buffer
,
3719 print_func_help_header(iter
->trace_buffer
, m
,
3725 static void test_ftrace_alive(struct seq_file
*m
)
3727 if (!ftrace_is_dead())
3729 seq_puts(m
, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3730 "# MAY BE MISSING FUNCTION EVENTS\n");
3733 #ifdef CONFIG_TRACER_MAX_TRACE
3734 static void show_snapshot_main_help(struct seq_file
*m
)
3736 seq_puts(m
, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3737 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3738 "# Takes a snapshot of the main buffer.\n"
3739 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3740 "# (Doesn't have to be '2' works with any number that\n"
3741 "# is not a '0' or '1')\n");
3744 static void show_snapshot_percpu_help(struct seq_file
*m
)
3746 seq_puts(m
, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3747 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3748 seq_puts(m
, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3749 "# Takes a snapshot of the main buffer for this cpu.\n");
3751 seq_puts(m
, "# echo 1 > snapshot : Not supported with this kernel.\n"
3752 "# Must use main snapshot file to allocate.\n");
3754 seq_puts(m
, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3755 "# (Doesn't have to be '2' works with any number that\n"
3756 "# is not a '0' or '1')\n");
3759 static void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
)
3761 if (iter
->tr
->allocated_snapshot
)
3762 seq_puts(m
, "#\n# * Snapshot is allocated *\n#\n");
3764 seq_puts(m
, "#\n# * Snapshot is freed *\n#\n");
3766 seq_puts(m
, "# Snapshot commands:\n");
3767 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
3768 show_snapshot_main_help(m
);
3770 show_snapshot_percpu_help(m
);
3773 /* Should never be called */
3774 static inline void print_snapshot_help(struct seq_file
*m
, struct trace_iterator
*iter
) { }
3777 static int s_show(struct seq_file
*m
, void *v
)
3779 struct trace_iterator
*iter
= v
;
3782 if (iter
->ent
== NULL
) {
3784 seq_printf(m
, "# tracer: %s\n", iter
->trace
->name
);
3786 test_ftrace_alive(m
);
3788 if (iter
->snapshot
&& trace_empty(iter
))
3789 print_snapshot_help(m
, iter
);
3790 else if (iter
->trace
&& iter
->trace
->print_header
)
3791 iter
->trace
->print_header(m
);
3793 trace_default_header(m
);
3795 } else if (iter
->leftover
) {
3797 * If we filled the seq_file buffer earlier, we
3798 * want to just show it now.
3800 ret
= trace_print_seq(m
, &iter
->seq
);
3802 /* ret should this time be zero, but you never know */
3803 iter
->leftover
= ret
;
3806 print_trace_line(iter
);
3807 ret
= trace_print_seq(m
, &iter
->seq
);
3809 * If we overflow the seq_file buffer, then it will
3810 * ask us for this data again at start up.
3812 * ret is 0 if seq_file write succeeded.
3815 iter
->leftover
= ret
;
3822 * Should be used after trace_array_get(), trace_types_lock
3823 * ensures that i_cdev was already initialized.
3825 static inline int tracing_get_cpu(struct inode
*inode
)
3827 if (inode
->i_cdev
) /* See trace_create_cpu_file() */
3828 return (long)inode
->i_cdev
- 1;
3829 return RING_BUFFER_ALL_CPUS
;
3832 static const struct seq_operations tracer_seq_ops
= {
3839 static struct trace_iterator
*
3840 __tracing_open(struct inode
*inode
, struct file
*file
, bool snapshot
)
3842 struct trace_array
*tr
= inode
->i_private
;
3843 struct trace_iterator
*iter
;
3846 if (tracing_disabled
)
3847 return ERR_PTR(-ENODEV
);
3849 iter
= __seq_open_private(file
, &tracer_seq_ops
, sizeof(*iter
));
3851 return ERR_PTR(-ENOMEM
);
3853 iter
->buffer_iter
= kcalloc(nr_cpu_ids
, sizeof(*iter
->buffer_iter
),
3855 if (!iter
->buffer_iter
)
3859 * We make a copy of the current tracer to avoid concurrent
3860 * changes on it while we are reading.
3862 mutex_lock(&trace_types_lock
);
3863 iter
->trace
= kzalloc(sizeof(*iter
->trace
), GFP_KERNEL
);
3867 *iter
->trace
= *tr
->current_trace
;
3869 if (!zalloc_cpumask_var(&iter
->started
, GFP_KERNEL
))
3874 #ifdef CONFIG_TRACER_MAX_TRACE
3875 /* Currently only the top directory has a snapshot */
3876 if (tr
->current_trace
->print_max
|| snapshot
)
3877 iter
->trace_buffer
= &tr
->max_buffer
;
3880 iter
->trace_buffer
= &tr
->trace_buffer
;
3881 iter
->snapshot
= snapshot
;
3883 iter
->cpu_file
= tracing_get_cpu(inode
);
3884 mutex_init(&iter
->mutex
);
3886 /* Notify the tracer early; before we stop tracing. */
3887 if (iter
->trace
&& iter
->trace
->open
)
3888 iter
->trace
->open(iter
);
3890 /* Annotate start of buffers if we had overruns */
3891 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
3892 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
3894 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3895 if (trace_clocks
[tr
->clock_id
].in_ns
)
3896 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
3898 /* stop the trace while dumping if we are not opening "snapshot" */
3899 if (!iter
->snapshot
)
3900 tracing_stop_tr(tr
);
3902 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
) {
3903 for_each_tracing_cpu(cpu
) {
3904 iter
->buffer_iter
[cpu
] =
3905 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3907 ring_buffer_read_prepare_sync();
3908 for_each_tracing_cpu(cpu
) {
3909 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3910 tracing_iter_reset(iter
, cpu
);
3913 cpu
= iter
->cpu_file
;
3914 iter
->buffer_iter
[cpu
] =
3915 ring_buffer_read_prepare(iter
->trace_buffer
->buffer
, cpu
);
3916 ring_buffer_read_prepare_sync();
3917 ring_buffer_read_start(iter
->buffer_iter
[cpu
]);
3918 tracing_iter_reset(iter
, cpu
);
3921 mutex_unlock(&trace_types_lock
);
3926 mutex_unlock(&trace_types_lock
);
3928 kfree(iter
->buffer_iter
);
3930 seq_release_private(inode
, file
);
3931 return ERR_PTR(-ENOMEM
);
3934 int tracing_open_generic(struct inode
*inode
, struct file
*filp
)
3936 if (tracing_disabled
)
3939 filp
->private_data
= inode
->i_private
;
3943 bool tracing_is_disabled(void)
3945 return (tracing_disabled
) ? true: false;
3949 * Open and update trace_array ref count.
3950 * Must have the current trace_array passed to it.
3952 static int tracing_open_generic_tr(struct inode
*inode
, struct file
*filp
)
3954 struct trace_array
*tr
= inode
->i_private
;
3956 if (tracing_disabled
)
3959 if (trace_array_get(tr
) < 0)
3962 filp
->private_data
= inode
->i_private
;
3967 static int tracing_release(struct inode
*inode
, struct file
*file
)
3969 struct trace_array
*tr
= inode
->i_private
;
3970 struct seq_file
*m
= file
->private_data
;
3971 struct trace_iterator
*iter
;
3974 if (!(file
->f_mode
& FMODE_READ
)) {
3975 trace_array_put(tr
);
3979 /* Writes do not use seq_file */
3981 mutex_lock(&trace_types_lock
);
3983 for_each_tracing_cpu(cpu
) {
3984 if (iter
->buffer_iter
[cpu
])
3985 ring_buffer_read_finish(iter
->buffer_iter
[cpu
]);
3988 if (iter
->trace
&& iter
->trace
->close
)
3989 iter
->trace
->close(iter
);
3991 if (!iter
->snapshot
)
3992 /* reenable tracing if it was previously enabled */
3993 tracing_start_tr(tr
);
3995 __trace_array_put(tr
);
3997 mutex_unlock(&trace_types_lock
);
3999 mutex_destroy(&iter
->mutex
);
4000 free_cpumask_var(iter
->started
);
4002 kfree(iter
->buffer_iter
);
4003 seq_release_private(inode
, file
);
4008 static int tracing_release_generic_tr(struct inode
*inode
, struct file
*file
)
4010 struct trace_array
*tr
= inode
->i_private
;
4012 trace_array_put(tr
);
4016 static int tracing_single_release_tr(struct inode
*inode
, struct file
*file
)
4018 struct trace_array
*tr
= inode
->i_private
;
4020 trace_array_put(tr
);
4022 return single_release(inode
, file
);
4025 static int tracing_open(struct inode
*inode
, struct file
*file
)
4027 struct trace_array
*tr
= inode
->i_private
;
4028 struct trace_iterator
*iter
;
4031 if (trace_array_get(tr
) < 0)
4034 /* If this file was open for write, then erase contents */
4035 if ((file
->f_mode
& FMODE_WRITE
) && (file
->f_flags
& O_TRUNC
)) {
4036 int cpu
= tracing_get_cpu(inode
);
4037 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
4039 #ifdef CONFIG_TRACER_MAX_TRACE
4040 if (tr
->current_trace
->print_max
)
4041 trace_buf
= &tr
->max_buffer
;
4044 if (cpu
== RING_BUFFER_ALL_CPUS
)
4045 tracing_reset_online_cpus(trace_buf
);
4047 tracing_reset(trace_buf
, cpu
);
4050 if (file
->f_mode
& FMODE_READ
) {
4051 iter
= __tracing_open(inode
, file
, false);
4053 ret
= PTR_ERR(iter
);
4054 else if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
4055 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
4059 trace_array_put(tr
);
4065 * Some tracers are not suitable for instance buffers.
4066 * A tracer is always available for the global array (toplevel)
4067 * or if it explicitly states that it is.
4070 trace_ok_for_array(struct tracer
*t
, struct trace_array
*tr
)
4072 return (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) || t
->allow_instances
;
4075 /* Find the next tracer that this trace array may use */
4076 static struct tracer
*
4077 get_tracer_for_array(struct trace_array
*tr
, struct tracer
*t
)
4079 while (t
&& !trace_ok_for_array(t
, tr
))
4086 t_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4088 struct trace_array
*tr
= m
->private;
4089 struct tracer
*t
= v
;
4094 t
= get_tracer_for_array(tr
, t
->next
);
4099 static void *t_start(struct seq_file
*m
, loff_t
*pos
)
4101 struct trace_array
*tr
= m
->private;
4105 mutex_lock(&trace_types_lock
);
4107 t
= get_tracer_for_array(tr
, trace_types
);
4108 for (; t
&& l
< *pos
; t
= t_next(m
, t
, &l
))
4114 static void t_stop(struct seq_file
*m
, void *p
)
4116 mutex_unlock(&trace_types_lock
);
4119 static int t_show(struct seq_file
*m
, void *v
)
4121 struct tracer
*t
= v
;
4126 seq_puts(m
, t
->name
);
4135 static const struct seq_operations show_traces_seq_ops
= {
4142 static int show_traces_open(struct inode
*inode
, struct file
*file
)
4144 struct trace_array
*tr
= inode
->i_private
;
4148 if (tracing_disabled
)
4151 ret
= seq_open(file
, &show_traces_seq_ops
);
4155 m
= file
->private_data
;
4162 tracing_write_stub(struct file
*filp
, const char __user
*ubuf
,
4163 size_t count
, loff_t
*ppos
)
4168 loff_t
tracing_lseek(struct file
*file
, loff_t offset
, int whence
)
4172 if (file
->f_mode
& FMODE_READ
)
4173 ret
= seq_lseek(file
, offset
, whence
);
4175 file
->f_pos
= ret
= 0;
4180 static const struct file_operations tracing_fops
= {
4181 .open
= tracing_open
,
4183 .write
= tracing_write_stub
,
4184 .llseek
= tracing_lseek
,
4185 .release
= tracing_release
,
4188 static const struct file_operations show_traces_fops
= {
4189 .open
= show_traces_open
,
4191 .release
= seq_release
,
4192 .llseek
= seq_lseek
,
4196 tracing_cpumask_read(struct file
*filp
, char __user
*ubuf
,
4197 size_t count
, loff_t
*ppos
)
4199 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4203 len
= snprintf(NULL
, 0, "%*pb\n",
4204 cpumask_pr_args(tr
->tracing_cpumask
)) + 1;
4205 mask_str
= kmalloc(len
, GFP_KERNEL
);
4209 len
= snprintf(mask_str
, len
, "%*pb\n",
4210 cpumask_pr_args(tr
->tracing_cpumask
));
4215 count
= simple_read_from_buffer(ubuf
, count
, ppos
, mask_str
, len
);
4224 tracing_cpumask_write(struct file
*filp
, const char __user
*ubuf
,
4225 size_t count
, loff_t
*ppos
)
4227 struct trace_array
*tr
= file_inode(filp
)->i_private
;
4228 cpumask_var_t tracing_cpumask_new
;
4231 if (!alloc_cpumask_var(&tracing_cpumask_new
, GFP_KERNEL
))
4234 err
= cpumask_parse_user(ubuf
, count
, tracing_cpumask_new
);
4238 local_irq_disable();
4239 arch_spin_lock(&tr
->max_lock
);
4240 for_each_tracing_cpu(cpu
) {
4242 * Increase/decrease the disabled counter if we are
4243 * about to flip a bit in the cpumask:
4245 if (cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4246 !cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4247 atomic_inc(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4248 ring_buffer_record_disable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4250 if (!cpumask_test_cpu(cpu
, tr
->tracing_cpumask
) &&
4251 cpumask_test_cpu(cpu
, tracing_cpumask_new
)) {
4252 atomic_dec(&per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->disabled
);
4253 ring_buffer_record_enable_cpu(tr
->trace_buffer
.buffer
, cpu
);
4256 arch_spin_unlock(&tr
->max_lock
);
4259 cpumask_copy(tr
->tracing_cpumask
, tracing_cpumask_new
);
4260 free_cpumask_var(tracing_cpumask_new
);
4265 free_cpumask_var(tracing_cpumask_new
);
4270 static const struct file_operations tracing_cpumask_fops
= {
4271 .open
= tracing_open_generic_tr
,
4272 .read
= tracing_cpumask_read
,
4273 .write
= tracing_cpumask_write
,
4274 .release
= tracing_release_generic_tr
,
4275 .llseek
= generic_file_llseek
,
4278 static int tracing_trace_options_show(struct seq_file
*m
, void *v
)
4280 struct tracer_opt
*trace_opts
;
4281 struct trace_array
*tr
= m
->private;
4285 mutex_lock(&trace_types_lock
);
4286 tracer_flags
= tr
->current_trace
->flags
->val
;
4287 trace_opts
= tr
->current_trace
->flags
->opts
;
4289 for (i
= 0; trace_options
[i
]; i
++) {
4290 if (tr
->trace_flags
& (1 << i
))
4291 seq_printf(m
, "%s\n", trace_options
[i
]);
4293 seq_printf(m
, "no%s\n", trace_options
[i
]);
4296 for (i
= 0; trace_opts
[i
].name
; i
++) {
4297 if (tracer_flags
& trace_opts
[i
].bit
)
4298 seq_printf(m
, "%s\n", trace_opts
[i
].name
);
4300 seq_printf(m
, "no%s\n", trace_opts
[i
].name
);
4302 mutex_unlock(&trace_types_lock
);
4307 static int __set_tracer_option(struct trace_array
*tr
,
4308 struct tracer_flags
*tracer_flags
,
4309 struct tracer_opt
*opts
, int neg
)
4311 struct tracer
*trace
= tracer_flags
->trace
;
4314 ret
= trace
->set_flag(tr
, tracer_flags
->val
, opts
->bit
, !neg
);
4319 tracer_flags
->val
&= ~opts
->bit
;
4321 tracer_flags
->val
|= opts
->bit
;
4325 /* Try to assign a tracer specific option */
4326 static int set_tracer_option(struct trace_array
*tr
, char *cmp
, int neg
)
4328 struct tracer
*trace
= tr
->current_trace
;
4329 struct tracer_flags
*tracer_flags
= trace
->flags
;
4330 struct tracer_opt
*opts
= NULL
;
4333 for (i
= 0; tracer_flags
->opts
[i
].name
; i
++) {
4334 opts
= &tracer_flags
->opts
[i
];
4336 if (strcmp(cmp
, opts
->name
) == 0)
4337 return __set_tracer_option(tr
, trace
->flags
, opts
, neg
);
4343 /* Some tracers require overwrite to stay enabled */
4344 int trace_keep_overwrite(struct tracer
*tracer
, u32 mask
, int set
)
4346 if (tracer
->enabled
&& (mask
& TRACE_ITER_OVERWRITE
) && !set
)
4352 int set_tracer_flag(struct trace_array
*tr
, unsigned int mask
, int enabled
)
4354 /* do nothing if flag is already set */
4355 if (!!(tr
->trace_flags
& mask
) == !!enabled
)
4358 /* Give the tracer a chance to approve the change */
4359 if (tr
->current_trace
->flag_changed
)
4360 if (tr
->current_trace
->flag_changed(tr
, mask
, !!enabled
))
4364 tr
->trace_flags
|= mask
;
4366 tr
->trace_flags
&= ~mask
;
4368 if (mask
== TRACE_ITER_RECORD_CMD
)
4369 trace_event_enable_cmd_record(enabled
);
4371 if (mask
== TRACE_ITER_RECORD_TGID
) {
4373 tgid_map
= kcalloc(PID_MAX_DEFAULT
+ 1,
4377 tr
->trace_flags
&= ~TRACE_ITER_RECORD_TGID
;
4381 trace_event_enable_tgid_record(enabled
);
4384 if (mask
== TRACE_ITER_EVENT_FORK
)
4385 trace_event_follow_fork(tr
, enabled
);
4387 if (mask
== TRACE_ITER_FUNC_FORK
)
4388 ftrace_pid_follow_fork(tr
, enabled
);
4390 if (mask
== TRACE_ITER_OVERWRITE
) {
4391 ring_buffer_change_overwrite(tr
->trace_buffer
.buffer
, enabled
);
4392 #ifdef CONFIG_TRACER_MAX_TRACE
4393 ring_buffer_change_overwrite(tr
->max_buffer
.buffer
, enabled
);
4397 if (mask
== TRACE_ITER_PRINTK
) {
4398 trace_printk_start_stop_comm(enabled
);
4399 trace_printk_control(enabled
);
4405 static int trace_set_options(struct trace_array
*tr
, char *option
)
4410 size_t orig_len
= strlen(option
);
4412 cmp
= strstrip(option
);
4414 if (strncmp(cmp
, "no", 2) == 0) {
4419 mutex_lock(&trace_types_lock
);
4421 ret
= match_string(trace_options
, -1, cmp
);
4422 /* If no option could be set, test the specific tracer options */
4424 ret
= set_tracer_option(tr
, cmp
, neg
);
4426 ret
= set_tracer_flag(tr
, 1 << ret
, !neg
);
4428 mutex_unlock(&trace_types_lock
);
4431 * If the first trailing whitespace is replaced with '\0' by strstrip,
4432 * turn it back into a space.
4434 if (orig_len
> strlen(option
))
4435 option
[strlen(option
)] = ' ';
4440 static void __init
apply_trace_boot_options(void)
4442 char *buf
= trace_boot_options_buf
;
4446 option
= strsep(&buf
, ",");
4452 trace_set_options(&global_trace
, option
);
4454 /* Put back the comma to allow this to be called again */
4461 tracing_trace_options_write(struct file
*filp
, const char __user
*ubuf
,
4462 size_t cnt
, loff_t
*ppos
)
4464 struct seq_file
*m
= filp
->private_data
;
4465 struct trace_array
*tr
= m
->private;
4469 if (cnt
>= sizeof(buf
))
4472 if (copy_from_user(buf
, ubuf
, cnt
))
4477 ret
= trace_set_options(tr
, buf
);
4486 static int tracing_trace_options_open(struct inode
*inode
, struct file
*file
)
4488 struct trace_array
*tr
= inode
->i_private
;
4491 if (tracing_disabled
)
4494 if (trace_array_get(tr
) < 0)
4497 ret
= single_open(file
, tracing_trace_options_show
, inode
->i_private
);
4499 trace_array_put(tr
);
4504 static const struct file_operations tracing_iter_fops
= {
4505 .open
= tracing_trace_options_open
,
4507 .llseek
= seq_lseek
,
4508 .release
= tracing_single_release_tr
,
4509 .write
= tracing_trace_options_write
,
4512 static const char readme_msg
[] =
4513 "tracing mini-HOWTO:\n\n"
4514 "# echo 0 > tracing_on : quick way to disable tracing\n"
4515 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4516 " Important files:\n"
4517 " trace\t\t\t- The static contents of the buffer\n"
4518 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4519 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4520 " current_tracer\t- function and latency tracers\n"
4521 " available_tracers\t- list of configured tracers for current_tracer\n"
4522 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4523 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4524 " trace_clock\t\t-change the clock used to order events\n"
4525 " local: Per cpu clock but may not be synced across CPUs\n"
4526 " global: Synced across CPUs but slows tracing down.\n"
4527 " counter: Not a clock, but just an increment\n"
4528 " uptime: Jiffy counter from time of boot\n"
4529 " perf: Same clock that perf events use\n"
4530 #ifdef CONFIG_X86_64
4531 " x86-tsc: TSC cycle counter\n"
4533 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4534 " delta: Delta difference against a buffer-wide timestamp\n"
4535 " absolute: Absolute (standalone) timestamp\n"
4536 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4537 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4538 " tracing_cpumask\t- Limit which CPUs to trace\n"
4539 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4540 "\t\t\t Remove sub-buffer with rmdir\n"
4541 " trace_options\t\t- Set format or modify how tracing happens\n"
4542 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4543 "\t\t\t option name\n"
4544 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4545 #ifdef CONFIG_DYNAMIC_FTRACE
4546 "\n available_filter_functions - list of functions that can be filtered on\n"
4547 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4548 "\t\t\t functions\n"
4549 "\t accepts: func_full_name or glob-matching-pattern\n"
4550 "\t modules: Can select a group via module\n"
4551 "\t Format: :mod:<module-name>\n"
4552 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4553 "\t triggers: a command to perform when function is hit\n"
4554 "\t Format: <function>:<trigger>[:count]\n"
4555 "\t trigger: traceon, traceoff\n"
4556 "\t\t enable_event:<system>:<event>\n"
4557 "\t\t disable_event:<system>:<event>\n"
4558 #ifdef CONFIG_STACKTRACE
4561 #ifdef CONFIG_TRACER_SNAPSHOT
4566 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4567 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4568 "\t The first one will disable tracing every time do_fault is hit\n"
4569 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4570 "\t The first time do trap is hit and it disables tracing, the\n"
4571 "\t counter will decrement to 2. If tracing is already disabled,\n"
4572 "\t the counter will not decrement. It only decrements when the\n"
4573 "\t trigger did work\n"
4574 "\t To remove trigger without count:\n"
4575 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4576 "\t To remove trigger with a count:\n"
4577 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4578 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4579 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4580 "\t modules: Can select a group via module command :mod:\n"
4581 "\t Does not accept triggers\n"
4582 #endif /* CONFIG_DYNAMIC_FTRACE */
4583 #ifdef CONFIG_FUNCTION_TRACER
4584 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4587 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4588 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4589 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4590 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4592 #ifdef CONFIG_TRACER_SNAPSHOT
4593 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4594 "\t\t\t snapshot buffer. Read the contents for more\n"
4595 "\t\t\t information\n"
4597 #ifdef CONFIG_STACK_TRACER
4598 " stack_trace\t\t- Shows the max stack trace when active\n"
4599 " stack_max_size\t- Shows current max stack size that was traced\n"
4600 "\t\t\t Write into this file to reset the max size (trigger a\n"
4601 "\t\t\t new trace)\n"
4602 #ifdef CONFIG_DYNAMIC_FTRACE
4603 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4606 #endif /* CONFIG_STACK_TRACER */
4607 #ifdef CONFIG_DYNAMIC_EVENTS
4608 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4609 "\t\t\t Write into this file to define/undefine new trace events.\n"
4611 #ifdef CONFIG_KPROBE_EVENTS
4612 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4613 "\t\t\t Write into this file to define/undefine new trace events.\n"
4615 #ifdef CONFIG_UPROBE_EVENTS
4616 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4617 "\t\t\t Write into this file to define/undefine new trace events.\n"
4619 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4620 "\t accepts: event-definitions (one definition per line)\n"
4621 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4622 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4623 "\t -:[<group>/]<event>\n"
4624 #ifdef CONFIG_KPROBE_EVENTS
4625 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4626 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4628 #ifdef CONFIG_UPROBE_EVENTS
4629 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4631 "\t args: <name>=fetcharg[:type]\n"
4632 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4633 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4634 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4636 "\t $stack<index>, $stack, $retval, $comm\n"
4638 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4639 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4640 "\t <type>\\[<array-size>\\]\n"
4642 " events/\t\t- Directory containing all trace event subsystems:\n"
4643 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4644 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4645 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4647 " filter\t\t- If set, only events passing filter are traced\n"
4648 " events/<system>/<event>/\t- Directory containing control files for\n"
4650 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4651 " filter\t\t- If set, only events passing filter are traced\n"
4652 " trigger\t\t- If set, a command to perform when event is hit\n"
4653 "\t Format: <trigger>[:count][if <filter>]\n"
4654 "\t trigger: traceon, traceoff\n"
4655 "\t enable_event:<system>:<event>\n"
4656 "\t disable_event:<system>:<event>\n"
4657 #ifdef CONFIG_HIST_TRIGGERS
4658 "\t enable_hist:<system>:<event>\n"
4659 "\t disable_hist:<system>:<event>\n"
4661 #ifdef CONFIG_STACKTRACE
4664 #ifdef CONFIG_TRACER_SNAPSHOT
4667 #ifdef CONFIG_HIST_TRIGGERS
4668 "\t\t hist (see below)\n"
4670 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4671 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4672 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4673 "\t events/block/block_unplug/trigger\n"
4674 "\t The first disables tracing every time block_unplug is hit.\n"
4675 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4676 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4677 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4678 "\t Like function triggers, the counter is only decremented if it\n"
4679 "\t enabled or disabled tracing.\n"
4680 "\t To remove a trigger without a count:\n"
4681 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4682 "\t To remove a trigger with a count:\n"
4683 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4684 "\t Filters can be ignored when removing a trigger.\n"
4685 #ifdef CONFIG_HIST_TRIGGERS
4686 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4687 "\t Format: hist:keys=<field1[,field2,...]>\n"
4688 "\t [:values=<field1[,field2,...]>]\n"
4689 "\t [:sort=<field1[,field2,...]>]\n"
4690 "\t [:size=#entries]\n"
4691 "\t [:pause][:continue][:clear]\n"
4692 "\t [:name=histname1]\n"
4693 "\t [if <filter>]\n\n"
4694 "\t When a matching event is hit, an entry is added to a hash\n"
4695 "\t table using the key(s) and value(s) named, and the value of a\n"
4696 "\t sum called 'hitcount' is incremented. Keys and values\n"
4697 "\t correspond to fields in the event's format description. Keys\n"
4698 "\t can be any field, or the special string 'stacktrace'.\n"
4699 "\t Compound keys consisting of up to two fields can be specified\n"
4700 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4701 "\t fields. Sort keys consisting of up to two fields can be\n"
4702 "\t specified using the 'sort' keyword. The sort direction can\n"
4703 "\t be modified by appending '.descending' or '.ascending' to a\n"
4704 "\t sort field. The 'size' parameter can be used to specify more\n"
4705 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4706 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4707 "\t its histogram data will be shared with other triggers of the\n"
4708 "\t same name, and trigger hits will update this common data.\n\n"
4709 "\t Reading the 'hist' file for the event will dump the hash\n"
4710 "\t table in its entirety to stdout. If there are multiple hist\n"
4711 "\t triggers attached to an event, there will be a table for each\n"
4712 "\t trigger in the output. The table displayed for a named\n"
4713 "\t trigger will be the same as any other instance having the\n"
4714 "\t same name. The default format used to display a given field\n"
4715 "\t can be modified by appending any of the following modifiers\n"
4716 "\t to the field name, as applicable:\n\n"
4717 "\t .hex display a number as a hex value\n"
4718 "\t .sym display an address as a symbol\n"
4719 "\t .sym-offset display an address as a symbol and offset\n"
4720 "\t .execname display a common_pid as a program name\n"
4721 "\t .syscall display a syscall id as a syscall name\n"
4722 "\t .log2 display log2 value rather than raw number\n"
4723 "\t .usecs display a common_timestamp in microseconds\n\n"
4724 "\t The 'pause' parameter can be used to pause an existing hist\n"
4725 "\t trigger or to start a hist trigger but not log any events\n"
4726 "\t until told to do so. 'continue' can be used to start or\n"
4727 "\t restart a paused hist trigger.\n\n"
4728 "\t The 'clear' parameter will clear the contents of a running\n"
4729 "\t hist trigger and leave its current paused/active state\n"
4731 "\t The enable_hist and disable_hist triggers can be used to\n"
4732 "\t have one event conditionally start and stop another event's\n"
4733 "\t already-attached hist trigger. The syntax is analagous to\n"
4734 "\t the enable_event and disable_event triggers.\n"
4739 tracing_readme_read(struct file
*filp
, char __user
*ubuf
,
4740 size_t cnt
, loff_t
*ppos
)
4742 return simple_read_from_buffer(ubuf
, cnt
, ppos
,
4743 readme_msg
, strlen(readme_msg
));
4746 static const struct file_operations tracing_readme_fops
= {
4747 .open
= tracing_open_generic
,
4748 .read
= tracing_readme_read
,
4749 .llseek
= generic_file_llseek
,
4752 static void *saved_tgids_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4756 if (*pos
|| m
->count
)
4761 for (; ptr
<= &tgid_map
[PID_MAX_DEFAULT
]; ptr
++) {
4762 if (trace_find_tgid(*ptr
))
4769 static void *saved_tgids_start(struct seq_file
*m
, loff_t
*pos
)
4779 v
= saved_tgids_next(m
, v
, &l
);
4787 static void saved_tgids_stop(struct seq_file
*m
, void *v
)
4791 static int saved_tgids_show(struct seq_file
*m
, void *v
)
4793 int pid
= (int *)v
- tgid_map
;
4795 seq_printf(m
, "%d %d\n", pid
, trace_find_tgid(pid
));
4799 static const struct seq_operations tracing_saved_tgids_seq_ops
= {
4800 .start
= saved_tgids_start
,
4801 .stop
= saved_tgids_stop
,
4802 .next
= saved_tgids_next
,
4803 .show
= saved_tgids_show
,
4806 static int tracing_saved_tgids_open(struct inode
*inode
, struct file
*filp
)
4808 if (tracing_disabled
)
4811 return seq_open(filp
, &tracing_saved_tgids_seq_ops
);
4815 static const struct file_operations tracing_saved_tgids_fops
= {
4816 .open
= tracing_saved_tgids_open
,
4818 .llseek
= seq_lseek
,
4819 .release
= seq_release
,
4822 static void *saved_cmdlines_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4824 unsigned int *ptr
= v
;
4826 if (*pos
|| m
->count
)
4831 for (; ptr
< &savedcmd
->map_cmdline_to_pid
[savedcmd
->cmdline_num
];
4833 if (*ptr
== -1 || *ptr
== NO_CMDLINE_MAP
)
4842 static void *saved_cmdlines_start(struct seq_file
*m
, loff_t
*pos
)
4848 arch_spin_lock(&trace_cmdline_lock
);
4850 v
= &savedcmd
->map_cmdline_to_pid
[0];
4852 v
= saved_cmdlines_next(m
, v
, &l
);
4860 static void saved_cmdlines_stop(struct seq_file
*m
, void *v
)
4862 arch_spin_unlock(&trace_cmdline_lock
);
4866 static int saved_cmdlines_show(struct seq_file
*m
, void *v
)
4868 char buf
[TASK_COMM_LEN
];
4869 unsigned int *pid
= v
;
4871 __trace_find_cmdline(*pid
, buf
);
4872 seq_printf(m
, "%d %s\n", *pid
, buf
);
4876 static const struct seq_operations tracing_saved_cmdlines_seq_ops
= {
4877 .start
= saved_cmdlines_start
,
4878 .next
= saved_cmdlines_next
,
4879 .stop
= saved_cmdlines_stop
,
4880 .show
= saved_cmdlines_show
,
4883 static int tracing_saved_cmdlines_open(struct inode
*inode
, struct file
*filp
)
4885 if (tracing_disabled
)
4888 return seq_open(filp
, &tracing_saved_cmdlines_seq_ops
);
4891 static const struct file_operations tracing_saved_cmdlines_fops
= {
4892 .open
= tracing_saved_cmdlines_open
,
4894 .llseek
= seq_lseek
,
4895 .release
= seq_release
,
4899 tracing_saved_cmdlines_size_read(struct file
*filp
, char __user
*ubuf
,
4900 size_t cnt
, loff_t
*ppos
)
4905 arch_spin_lock(&trace_cmdline_lock
);
4906 r
= scnprintf(buf
, sizeof(buf
), "%u\n", savedcmd
->cmdline_num
);
4907 arch_spin_unlock(&trace_cmdline_lock
);
4909 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
4912 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer
*s
)
4914 kfree(s
->saved_cmdlines
);
4915 kfree(s
->map_cmdline_to_pid
);
4919 static int tracing_resize_saved_cmdlines(unsigned int val
)
4921 struct saved_cmdlines_buffer
*s
, *savedcmd_temp
;
4923 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
4927 if (allocate_cmdlines_buffer(val
, s
) < 0) {
4932 arch_spin_lock(&trace_cmdline_lock
);
4933 savedcmd_temp
= savedcmd
;
4935 arch_spin_unlock(&trace_cmdline_lock
);
4936 free_saved_cmdlines_buffer(savedcmd_temp
);
4942 tracing_saved_cmdlines_size_write(struct file
*filp
, const char __user
*ubuf
,
4943 size_t cnt
, loff_t
*ppos
)
4948 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
4952 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4953 if (!val
|| val
> PID_MAX_DEFAULT
)
4956 ret
= tracing_resize_saved_cmdlines((unsigned int)val
);
4965 static const struct file_operations tracing_saved_cmdlines_size_fops
= {
4966 .open
= tracing_open_generic
,
4967 .read
= tracing_saved_cmdlines_size_read
,
4968 .write
= tracing_saved_cmdlines_size_write
,
4971 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4972 static union trace_eval_map_item
*
4973 update_eval_map(union trace_eval_map_item
*ptr
)
4975 if (!ptr
->map
.eval_string
) {
4976 if (ptr
->tail
.next
) {
4977 ptr
= ptr
->tail
.next
;
4978 /* Set ptr to the next real item (skip head) */
4986 static void *eval_map_next(struct seq_file
*m
, void *v
, loff_t
*pos
)
4988 union trace_eval_map_item
*ptr
= v
;
4991 * Paranoid! If ptr points to end, we don't want to increment past it.
4992 * This really should never happen.
4994 ptr
= update_eval_map(ptr
);
4995 if (WARN_ON_ONCE(!ptr
))
5002 ptr
= update_eval_map(ptr
);
5007 static void *eval_map_start(struct seq_file
*m
, loff_t
*pos
)
5009 union trace_eval_map_item
*v
;
5012 mutex_lock(&trace_eval_mutex
);
5014 v
= trace_eval_maps
;
5018 while (v
&& l
< *pos
) {
5019 v
= eval_map_next(m
, v
, &l
);
5025 static void eval_map_stop(struct seq_file
*m
, void *v
)
5027 mutex_unlock(&trace_eval_mutex
);
5030 static int eval_map_show(struct seq_file
*m
, void *v
)
5032 union trace_eval_map_item
*ptr
= v
;
5034 seq_printf(m
, "%s %ld (%s)\n",
5035 ptr
->map
.eval_string
, ptr
->map
.eval_value
,
5041 static const struct seq_operations tracing_eval_map_seq_ops
= {
5042 .start
= eval_map_start
,
5043 .next
= eval_map_next
,
5044 .stop
= eval_map_stop
,
5045 .show
= eval_map_show
,
5048 static int tracing_eval_map_open(struct inode
*inode
, struct file
*filp
)
5050 if (tracing_disabled
)
5053 return seq_open(filp
, &tracing_eval_map_seq_ops
);
5056 static const struct file_operations tracing_eval_map_fops
= {
5057 .open
= tracing_eval_map_open
,
5059 .llseek
= seq_lseek
,
5060 .release
= seq_release
,
5063 static inline union trace_eval_map_item
*
5064 trace_eval_jmp_to_tail(union trace_eval_map_item
*ptr
)
5066 /* Return tail of array given the head */
5067 return ptr
+ ptr
->head
.length
+ 1;
5071 trace_insert_eval_map_file(struct module
*mod
, struct trace_eval_map
**start
,
5074 struct trace_eval_map
**stop
;
5075 struct trace_eval_map
**map
;
5076 union trace_eval_map_item
*map_array
;
5077 union trace_eval_map_item
*ptr
;
5082 * The trace_eval_maps contains the map plus a head and tail item,
5083 * where the head holds the module and length of array, and the
5084 * tail holds a pointer to the next list.
5086 map_array
= kmalloc_array(len
+ 2, sizeof(*map_array
), GFP_KERNEL
);
5088 pr_warn("Unable to allocate trace eval mapping\n");
5092 mutex_lock(&trace_eval_mutex
);
5094 if (!trace_eval_maps
)
5095 trace_eval_maps
= map_array
;
5097 ptr
= trace_eval_maps
;
5099 ptr
= trace_eval_jmp_to_tail(ptr
);
5100 if (!ptr
->tail
.next
)
5102 ptr
= ptr
->tail
.next
;
5105 ptr
->tail
.next
= map_array
;
5107 map_array
->head
.mod
= mod
;
5108 map_array
->head
.length
= len
;
5111 for (map
= start
; (unsigned long)map
< (unsigned long)stop
; map
++) {
5112 map_array
->map
= **map
;
5115 memset(map_array
, 0, sizeof(*map_array
));
5117 mutex_unlock(&trace_eval_mutex
);
5120 static void trace_create_eval_file(struct dentry
*d_tracer
)
5122 trace_create_file("eval_map", 0444, d_tracer
,
5123 NULL
, &tracing_eval_map_fops
);
5126 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5127 static inline void trace_create_eval_file(struct dentry
*d_tracer
) { }
5128 static inline void trace_insert_eval_map_file(struct module
*mod
,
5129 struct trace_eval_map
**start
, int len
) { }
5130 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5132 static void trace_insert_eval_map(struct module
*mod
,
5133 struct trace_eval_map
**start
, int len
)
5135 struct trace_eval_map
**map
;
5142 trace_event_eval_update(map
, len
);
5144 trace_insert_eval_map_file(mod
, start
, len
);
5148 tracing_set_trace_read(struct file
*filp
, char __user
*ubuf
,
5149 size_t cnt
, loff_t
*ppos
)
5151 struct trace_array
*tr
= filp
->private_data
;
5152 char buf
[MAX_TRACER_SIZE
+2];
5155 mutex_lock(&trace_types_lock
);
5156 r
= sprintf(buf
, "%s\n", tr
->current_trace
->name
);
5157 mutex_unlock(&trace_types_lock
);
5159 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5162 int tracer_init(struct tracer
*t
, struct trace_array
*tr
)
5164 tracing_reset_online_cpus(&tr
->trace_buffer
);
5168 static void set_buffer_entries(struct trace_buffer
*buf
, unsigned long val
)
5172 for_each_tracing_cpu(cpu
)
5173 per_cpu_ptr(buf
->data
, cpu
)->entries
= val
;
5176 #ifdef CONFIG_TRACER_MAX_TRACE
5177 /* resize @tr's buffer to the size of @size_tr's entries */
5178 static int resize_buffer_duplicate_size(struct trace_buffer
*trace_buf
,
5179 struct trace_buffer
*size_buf
, int cpu_id
)
5183 if (cpu_id
== RING_BUFFER_ALL_CPUS
) {
5184 for_each_tracing_cpu(cpu
) {
5185 ret
= ring_buffer_resize(trace_buf
->buffer
,
5186 per_cpu_ptr(size_buf
->data
, cpu
)->entries
, cpu
);
5189 per_cpu_ptr(trace_buf
->data
, cpu
)->entries
=
5190 per_cpu_ptr(size_buf
->data
, cpu
)->entries
;
5193 ret
= ring_buffer_resize(trace_buf
->buffer
,
5194 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
, cpu_id
);
5196 per_cpu_ptr(trace_buf
->data
, cpu_id
)->entries
=
5197 per_cpu_ptr(size_buf
->data
, cpu_id
)->entries
;
5202 #endif /* CONFIG_TRACER_MAX_TRACE */
5204 static int __tracing_resize_ring_buffer(struct trace_array
*tr
,
5205 unsigned long size
, int cpu
)
5210 * If kernel or user changes the size of the ring buffer
5211 * we use the size that was given, and we can forget about
5212 * expanding it later.
5214 ring_buffer_expanded
= true;
5216 /* May be called before buffers are initialized */
5217 if (!tr
->trace_buffer
.buffer
)
5220 ret
= ring_buffer_resize(tr
->trace_buffer
.buffer
, size
, cpu
);
5224 #ifdef CONFIG_TRACER_MAX_TRACE
5225 if (!(tr
->flags
& TRACE_ARRAY_FL_GLOBAL
) ||
5226 !tr
->current_trace
->use_max_tr
)
5229 ret
= ring_buffer_resize(tr
->max_buffer
.buffer
, size
, cpu
);
5231 int r
= resize_buffer_duplicate_size(&tr
->trace_buffer
,
5232 &tr
->trace_buffer
, cpu
);
5235 * AARGH! We are left with different
5236 * size max buffer!!!!
5237 * The max buffer is our "snapshot" buffer.
5238 * When a tracer needs a snapshot (one of the
5239 * latency tracers), it swaps the max buffer
5240 * with the saved snap shot. We succeeded to
5241 * update the size of the main buffer, but failed to
5242 * update the size of the max buffer. But when we tried
5243 * to reset the main buffer to the original size, we
5244 * failed there too. This is very unlikely to
5245 * happen, but if it does, warn and kill all
5249 tracing_disabled
= 1;
5254 if (cpu
== RING_BUFFER_ALL_CPUS
)
5255 set_buffer_entries(&tr
->max_buffer
, size
);
5257 per_cpu_ptr(tr
->max_buffer
.data
, cpu
)->entries
= size
;
5260 #endif /* CONFIG_TRACER_MAX_TRACE */
5262 if (cpu
== RING_BUFFER_ALL_CPUS
)
5263 set_buffer_entries(&tr
->trace_buffer
, size
);
5265 per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
= size
;
5270 static ssize_t
tracing_resize_ring_buffer(struct trace_array
*tr
,
5271 unsigned long size
, int cpu_id
)
5275 mutex_lock(&trace_types_lock
);
5277 if (cpu_id
!= RING_BUFFER_ALL_CPUS
) {
5278 /* make sure, this cpu is enabled in the mask */
5279 if (!cpumask_test_cpu(cpu_id
, tracing_buffer_mask
)) {
5285 ret
= __tracing_resize_ring_buffer(tr
, size
, cpu_id
);
5290 mutex_unlock(&trace_types_lock
);
5297 * tracing_update_buffers - used by tracing facility to expand ring buffers
5299 * To save on memory when the tracing is never used on a system with it
5300 * configured in. The ring buffers are set to a minimum size. But once
5301 * a user starts to use the tracing facility, then they need to grow
5302 * to their default size.
5304 * This function is to be called when a tracer is about to be used.
5306 int tracing_update_buffers(void)
5310 mutex_lock(&trace_types_lock
);
5311 if (!ring_buffer_expanded
)
5312 ret
= __tracing_resize_ring_buffer(&global_trace
, trace_buf_size
,
5313 RING_BUFFER_ALL_CPUS
);
5314 mutex_unlock(&trace_types_lock
);
5319 struct trace_option_dentry
;
5322 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
);
5325 * Used to clear out the tracer before deletion of an instance.
5326 * Must have trace_types_lock held.
5328 static void tracing_set_nop(struct trace_array
*tr
)
5330 if (tr
->current_trace
== &nop_trace
)
5333 tr
->current_trace
->enabled
--;
5335 if (tr
->current_trace
->reset
)
5336 tr
->current_trace
->reset(tr
);
5338 tr
->current_trace
= &nop_trace
;
5341 static void add_tracer_options(struct trace_array
*tr
, struct tracer
*t
)
5343 /* Only enable if the directory has been created already. */
5347 create_trace_option_files(tr
, t
);
5350 static int tracing_set_tracer(struct trace_array
*tr
, const char *buf
)
5353 #ifdef CONFIG_TRACER_MAX_TRACE
5358 mutex_lock(&trace_types_lock
);
5360 if (!ring_buffer_expanded
) {
5361 ret
= __tracing_resize_ring_buffer(tr
, trace_buf_size
,
5362 RING_BUFFER_ALL_CPUS
);
5368 for (t
= trace_types
; t
; t
= t
->next
) {
5369 if (strcmp(t
->name
, buf
) == 0)
5376 if (t
== tr
->current_trace
)
5379 /* Some tracers won't work on kernel command line */
5380 if (system_state
< SYSTEM_RUNNING
&& t
->noboot
) {
5381 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5386 /* Some tracers are only allowed for the top level buffer */
5387 if (!trace_ok_for_array(t
, tr
)) {
5392 /* If trace pipe files are being read, we can't change the tracer */
5393 if (tr
->current_trace
->ref
) {
5398 trace_branch_disable();
5400 tr
->current_trace
->enabled
--;
5402 if (tr
->current_trace
->reset
)
5403 tr
->current_trace
->reset(tr
);
5405 /* Current trace needs to be nop_trace before synchronize_sched */
5406 tr
->current_trace
= &nop_trace
;
5408 #ifdef CONFIG_TRACER_MAX_TRACE
5409 had_max_tr
= tr
->allocated_snapshot
;
5411 if (had_max_tr
&& !t
->use_max_tr
) {
5413 * We need to make sure that the update_max_tr sees that
5414 * current_trace changed to nop_trace to keep it from
5415 * swapping the buffers after we resize it.
5416 * The update_max_tr is called from interrupts disabled
5417 * so a synchronized_sched() is sufficient.
5419 synchronize_sched();
5424 #ifdef CONFIG_TRACER_MAX_TRACE
5425 if (t
->use_max_tr
&& !had_max_tr
) {
5426 ret
= tracing_alloc_snapshot_instance(tr
);
5433 ret
= tracer_init(t
, tr
);
5438 tr
->current_trace
= t
;
5439 tr
->current_trace
->enabled
++;
5440 trace_branch_enable(tr
);
5442 mutex_unlock(&trace_types_lock
);
5448 tracing_set_trace_write(struct file
*filp
, const char __user
*ubuf
,
5449 size_t cnt
, loff_t
*ppos
)
5451 struct trace_array
*tr
= filp
->private_data
;
5452 char buf
[MAX_TRACER_SIZE
+1];
5459 if (cnt
> MAX_TRACER_SIZE
)
5460 cnt
= MAX_TRACER_SIZE
;
5462 if (copy_from_user(buf
, ubuf
, cnt
))
5467 /* strip ending whitespace. */
5468 for (i
= cnt
- 1; i
> 0 && isspace(buf
[i
]); i
--)
5471 err
= tracing_set_tracer(tr
, buf
);
5481 tracing_nsecs_read(unsigned long *ptr
, char __user
*ubuf
,
5482 size_t cnt
, loff_t
*ppos
)
5487 r
= snprintf(buf
, sizeof(buf
), "%ld\n",
5488 *ptr
== (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr
));
5489 if (r
> sizeof(buf
))
5491 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
5495 tracing_nsecs_write(unsigned long *ptr
, const char __user
*ubuf
,
5496 size_t cnt
, loff_t
*ppos
)
5501 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
5511 tracing_thresh_read(struct file
*filp
, char __user
*ubuf
,
5512 size_t cnt
, loff_t
*ppos
)
5514 return tracing_nsecs_read(&tracing_thresh
, ubuf
, cnt
, ppos
);
5518 tracing_thresh_write(struct file
*filp
, const char __user
*ubuf
,
5519 size_t cnt
, loff_t
*ppos
)
5521 struct trace_array
*tr
= filp
->private_data
;
5524 mutex_lock(&trace_types_lock
);
5525 ret
= tracing_nsecs_write(&tracing_thresh
, ubuf
, cnt
, ppos
);
5529 if (tr
->current_trace
->update_thresh
) {
5530 ret
= tr
->current_trace
->update_thresh(tr
);
5537 mutex_unlock(&trace_types_lock
);
5542 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5545 tracing_max_lat_read(struct file
*filp
, char __user
*ubuf
,
5546 size_t cnt
, loff_t
*ppos
)
5548 return tracing_nsecs_read(filp
->private_data
, ubuf
, cnt
, ppos
);
5552 tracing_max_lat_write(struct file
*filp
, const char __user
*ubuf
,
5553 size_t cnt
, loff_t
*ppos
)
5555 return tracing_nsecs_write(filp
->private_data
, ubuf
, cnt
, ppos
);
5560 static int tracing_open_pipe(struct inode
*inode
, struct file
*filp
)
5562 struct trace_array
*tr
= inode
->i_private
;
5563 struct trace_iterator
*iter
;
5566 if (tracing_disabled
)
5569 if (trace_array_get(tr
) < 0)
5572 mutex_lock(&trace_types_lock
);
5574 /* create a buffer to store the information to pass to userspace */
5575 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
5578 __trace_array_put(tr
);
5582 trace_seq_init(&iter
->seq
);
5583 iter
->trace
= tr
->current_trace
;
5585 if (!alloc_cpumask_var(&iter
->started
, GFP_KERNEL
)) {
5590 /* trace pipe does not show start of buffer */
5591 cpumask_setall(iter
->started
);
5593 if (tr
->trace_flags
& TRACE_ITER_LATENCY_FMT
)
5594 iter
->iter_flags
|= TRACE_FILE_LAT_FMT
;
5596 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5597 if (trace_clocks
[tr
->clock_id
].in_ns
)
5598 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
5601 iter
->trace_buffer
= &tr
->trace_buffer
;
5602 iter
->cpu_file
= tracing_get_cpu(inode
);
5603 mutex_init(&iter
->mutex
);
5604 filp
->private_data
= iter
;
5606 if (iter
->trace
->pipe_open
)
5607 iter
->trace
->pipe_open(iter
);
5609 nonseekable_open(inode
, filp
);
5611 tr
->current_trace
->ref
++;
5613 mutex_unlock(&trace_types_lock
);
5619 __trace_array_put(tr
);
5620 mutex_unlock(&trace_types_lock
);
5624 static int tracing_release_pipe(struct inode
*inode
, struct file
*file
)
5626 struct trace_iterator
*iter
= file
->private_data
;
5627 struct trace_array
*tr
= inode
->i_private
;
5629 mutex_lock(&trace_types_lock
);
5631 tr
->current_trace
->ref
--;
5633 if (iter
->trace
->pipe_close
)
5634 iter
->trace
->pipe_close(iter
);
5636 mutex_unlock(&trace_types_lock
);
5638 free_cpumask_var(iter
->started
);
5639 mutex_destroy(&iter
->mutex
);
5642 trace_array_put(tr
);
5648 trace_poll(struct trace_iterator
*iter
, struct file
*filp
, poll_table
*poll_table
)
5650 struct trace_array
*tr
= iter
->tr
;
5652 /* Iterators are static, they should be filled or empty */
5653 if (trace_buffer_iter(iter
, iter
->cpu_file
))
5654 return EPOLLIN
| EPOLLRDNORM
;
5656 if (tr
->trace_flags
& TRACE_ITER_BLOCK
)
5658 * Always select as readable when in blocking mode
5660 return EPOLLIN
| EPOLLRDNORM
;
5662 return ring_buffer_poll_wait(iter
->trace_buffer
->buffer
, iter
->cpu_file
,
5667 tracing_poll_pipe(struct file
*filp
, poll_table
*poll_table
)
5669 struct trace_iterator
*iter
= filp
->private_data
;
5671 return trace_poll(iter
, filp
, poll_table
);
5674 /* Must be called with iter->mutex held. */
5675 static int tracing_wait_pipe(struct file
*filp
)
5677 struct trace_iterator
*iter
= filp
->private_data
;
5680 while (trace_empty(iter
)) {
5682 if ((filp
->f_flags
& O_NONBLOCK
)) {
5687 * We block until we read something and tracing is disabled.
5688 * We still block if tracing is disabled, but we have never
5689 * read anything. This allows a user to cat this file, and
5690 * then enable tracing. But after we have read something,
5691 * we give an EOF when tracing is again disabled.
5693 * iter->pos will be 0 if we haven't read anything.
5695 if (!tracer_tracing_is_on(iter
->tr
) && iter
->pos
)
5698 mutex_unlock(&iter
->mutex
);
5700 ret
= wait_on_pipe(iter
, 0);
5702 mutex_lock(&iter
->mutex
);
5715 tracing_read_pipe(struct file
*filp
, char __user
*ubuf
,
5716 size_t cnt
, loff_t
*ppos
)
5718 struct trace_iterator
*iter
= filp
->private_data
;
5722 * Avoid more than one consumer on a single file descriptor
5723 * This is just a matter of traces coherency, the ring buffer itself
5726 mutex_lock(&iter
->mutex
);
5728 /* return any leftover data */
5729 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5733 trace_seq_init(&iter
->seq
);
5735 if (iter
->trace
->read
) {
5736 sret
= iter
->trace
->read(iter
, filp
, ubuf
, cnt
, ppos
);
5742 sret
= tracing_wait_pipe(filp
);
5746 /* stop when tracing is finished */
5747 if (trace_empty(iter
)) {
5752 if (cnt
>= PAGE_SIZE
)
5753 cnt
= PAGE_SIZE
- 1;
5755 /* reset all but tr, trace, and overruns */
5756 memset(&iter
->seq
, 0,
5757 sizeof(struct trace_iterator
) -
5758 offsetof(struct trace_iterator
, seq
));
5759 cpumask_clear(iter
->started
);
5762 trace_event_read_lock();
5763 trace_access_lock(iter
->cpu_file
);
5764 while (trace_find_next_entry_inc(iter
) != NULL
) {
5765 enum print_line_t ret
;
5766 int save_len
= iter
->seq
.seq
.len
;
5768 ret
= print_trace_line(iter
);
5769 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5770 /* don't print partial lines */
5771 iter
->seq
.seq
.len
= save_len
;
5774 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5775 trace_consume(iter
);
5777 if (trace_seq_used(&iter
->seq
) >= cnt
)
5781 * Setting the full flag means we reached the trace_seq buffer
5782 * size and we should leave by partial output condition above.
5783 * One of the trace_seq_* functions is not used properly.
5785 WARN_ONCE(iter
->seq
.full
, "full flag set for trace type %d",
5788 trace_access_unlock(iter
->cpu_file
);
5789 trace_event_read_unlock();
5791 /* Now copy what we have to the user */
5792 sret
= trace_seq_to_user(&iter
->seq
, ubuf
, cnt
);
5793 if (iter
->seq
.seq
.readpos
>= trace_seq_used(&iter
->seq
))
5794 trace_seq_init(&iter
->seq
);
5797 * If there was nothing to send to user, in spite of consuming trace
5798 * entries, go back to wait for more entries.
5804 mutex_unlock(&iter
->mutex
);
5809 static void tracing_spd_release_pipe(struct splice_pipe_desc
*spd
,
5812 __free_page(spd
->pages
[idx
]);
5815 static const struct pipe_buf_operations tracing_pipe_buf_ops
= {
5817 .confirm
= generic_pipe_buf_confirm
,
5818 .release
= generic_pipe_buf_release
,
5819 .steal
= generic_pipe_buf_steal
,
5820 .get
= generic_pipe_buf_get
,
5824 tracing_fill_pipe_page(size_t rem
, struct trace_iterator
*iter
)
5830 /* Seq buffer is page-sized, exactly what we need. */
5832 save_len
= iter
->seq
.seq
.len
;
5833 ret
= print_trace_line(iter
);
5835 if (trace_seq_has_overflowed(&iter
->seq
)) {
5836 iter
->seq
.seq
.len
= save_len
;
5841 * This should not be hit, because it should only
5842 * be set if the iter->seq overflowed. But check it
5843 * anyway to be safe.
5845 if (ret
== TRACE_TYPE_PARTIAL_LINE
) {
5846 iter
->seq
.seq
.len
= save_len
;
5850 count
= trace_seq_used(&iter
->seq
) - save_len
;
5853 iter
->seq
.seq
.len
= save_len
;
5857 if (ret
!= TRACE_TYPE_NO_CONSUME
)
5858 trace_consume(iter
);
5860 if (!trace_find_next_entry_inc(iter
)) {
5870 static ssize_t
tracing_splice_read_pipe(struct file
*filp
,
5872 struct pipe_inode_info
*pipe
,
5876 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
5877 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
5878 struct trace_iterator
*iter
= filp
->private_data
;
5879 struct splice_pipe_desc spd
= {
5881 .partial
= partial_def
,
5882 .nr_pages
= 0, /* This gets updated below. */
5883 .nr_pages_max
= PIPE_DEF_BUFFERS
,
5884 .ops
= &tracing_pipe_buf_ops
,
5885 .spd_release
= tracing_spd_release_pipe
,
5891 if (splice_grow_spd(pipe
, &spd
))
5894 mutex_lock(&iter
->mutex
);
5896 if (iter
->trace
->splice_read
) {
5897 ret
= iter
->trace
->splice_read(iter
, filp
,
5898 ppos
, pipe
, len
, flags
);
5903 ret
= tracing_wait_pipe(filp
);
5907 if (!iter
->ent
&& !trace_find_next_entry_inc(iter
)) {
5912 trace_event_read_lock();
5913 trace_access_lock(iter
->cpu_file
);
5915 /* Fill as many pages as possible. */
5916 for (i
= 0, rem
= len
; i
< spd
.nr_pages_max
&& rem
; i
++) {
5917 spd
.pages
[i
] = alloc_page(GFP_KERNEL
);
5921 rem
= tracing_fill_pipe_page(rem
, iter
);
5923 /* Copy the data into the page, so we can start over. */
5924 ret
= trace_seq_to_buffer(&iter
->seq
,
5925 page_address(spd
.pages
[i
]),
5926 trace_seq_used(&iter
->seq
));
5928 __free_page(spd
.pages
[i
]);
5931 spd
.partial
[i
].offset
= 0;
5932 spd
.partial
[i
].len
= trace_seq_used(&iter
->seq
);
5934 trace_seq_init(&iter
->seq
);
5937 trace_access_unlock(iter
->cpu_file
);
5938 trace_event_read_unlock();
5939 mutex_unlock(&iter
->mutex
);
5944 ret
= splice_to_pipe(pipe
, &spd
);
5948 splice_shrink_spd(&spd
);
5952 mutex_unlock(&iter
->mutex
);
5957 tracing_entries_read(struct file
*filp
, char __user
*ubuf
,
5958 size_t cnt
, loff_t
*ppos
)
5960 struct inode
*inode
= file_inode(filp
);
5961 struct trace_array
*tr
= inode
->i_private
;
5962 int cpu
= tracing_get_cpu(inode
);
5967 mutex_lock(&trace_types_lock
);
5969 if (cpu
== RING_BUFFER_ALL_CPUS
) {
5970 int cpu
, buf_size_same
;
5975 /* check if all cpu sizes are same */
5976 for_each_tracing_cpu(cpu
) {
5977 /* fill in the size from first enabled cpu */
5979 size
= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
;
5980 if (size
!= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
) {
5986 if (buf_size_same
) {
5987 if (!ring_buffer_expanded
)
5988 r
= sprintf(buf
, "%lu (expanded: %lu)\n",
5990 trace_buf_size
>> 10);
5992 r
= sprintf(buf
, "%lu\n", size
>> 10);
5994 r
= sprintf(buf
, "X\n");
5996 r
= sprintf(buf
, "%lu\n", per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10);
5998 mutex_unlock(&trace_types_lock
);
6000 ret
= simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6005 tracing_entries_write(struct file
*filp
, const char __user
*ubuf
,
6006 size_t cnt
, loff_t
*ppos
)
6008 struct inode
*inode
= file_inode(filp
);
6009 struct trace_array
*tr
= inode
->i_private
;
6013 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6017 /* must have at least 1 entry */
6021 /* value is in KB */
6023 ret
= tracing_resize_ring_buffer(tr
, val
, tracing_get_cpu(inode
));
6033 tracing_total_entries_read(struct file
*filp
, char __user
*ubuf
,
6034 size_t cnt
, loff_t
*ppos
)
6036 struct trace_array
*tr
= filp
->private_data
;
6039 unsigned long size
= 0, expanded_size
= 0;
6041 mutex_lock(&trace_types_lock
);
6042 for_each_tracing_cpu(cpu
) {
6043 size
+= per_cpu_ptr(tr
->trace_buffer
.data
, cpu
)->entries
>> 10;
6044 if (!ring_buffer_expanded
)
6045 expanded_size
+= trace_buf_size
>> 10;
6047 if (ring_buffer_expanded
)
6048 r
= sprintf(buf
, "%lu\n", size
);
6050 r
= sprintf(buf
, "%lu (expanded: %lu)\n", size
, expanded_size
);
6051 mutex_unlock(&trace_types_lock
);
6053 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
6057 tracing_free_buffer_write(struct file
*filp
, const char __user
*ubuf
,
6058 size_t cnt
, loff_t
*ppos
)
6061 * There is no need to read what the user has written, this function
6062 * is just to make sure that there is no error when "echo" is used
6071 tracing_free_buffer_release(struct inode
*inode
, struct file
*filp
)
6073 struct trace_array
*tr
= inode
->i_private
;
6075 /* disable tracing ? */
6076 if (tr
->trace_flags
& TRACE_ITER_STOP_ON_FREE
)
6077 tracer_tracing_off(tr
);
6078 /* resize the ring buffer to 0 */
6079 tracing_resize_ring_buffer(tr
, 0, RING_BUFFER_ALL_CPUS
);
6081 trace_array_put(tr
);
6087 tracing_mark_write(struct file
*filp
, const char __user
*ubuf
,
6088 size_t cnt
, loff_t
*fpos
)
6090 struct trace_array
*tr
= filp
->private_data
;
6091 struct ring_buffer_event
*event
;
6092 enum event_trigger_type tt
= ETT_NONE
;
6093 struct ring_buffer
*buffer
;
6094 struct print_entry
*entry
;
6095 unsigned long irq_flags
;
6096 const char faulted
[] = "<faulted>";
6101 /* Used in tracing_mark_raw_write() as well */
6102 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6104 if (tracing_disabled
)
6107 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6110 if (cnt
> TRACE_BUF_SIZE
)
6111 cnt
= TRACE_BUF_SIZE
;
6113 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6115 local_save_flags(irq_flags
);
6116 size
= sizeof(*entry
) + cnt
+ 2; /* add '\0' and possible '\n' */
6118 /* If less than "<faulted>", then make sure we can still add that */
6119 if (cnt
< FAULTED_SIZE
)
6120 size
+= FAULTED_SIZE
- cnt
;
6122 buffer
= tr
->trace_buffer
.buffer
;
6123 event
= __trace_buffer_lock_reserve(buffer
, TRACE_PRINT
, size
,
6124 irq_flags
, preempt_count());
6125 if (unlikely(!event
))
6126 /* Ring buffer disabled, return as if not open for write */
6129 entry
= ring_buffer_event_data(event
);
6130 entry
->ip
= _THIS_IP_
;
6132 len
= __copy_from_user_inatomic(&entry
->buf
, ubuf
, cnt
);
6134 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6141 if (tr
->trace_marker_file
&& !list_empty(&tr
->trace_marker_file
->triggers
)) {
6142 /* do not add \n before testing triggers, but add \0 */
6143 entry
->buf
[cnt
] = '\0';
6144 tt
= event_triggers_call(tr
->trace_marker_file
, entry
, event
);
6147 if (entry
->buf
[cnt
- 1] != '\n') {
6148 entry
->buf
[cnt
] = '\n';
6149 entry
->buf
[cnt
+ 1] = '\0';
6151 entry
->buf
[cnt
] = '\0';
6153 __buffer_unlock_commit(buffer
, event
);
6156 event_triggers_post_call(tr
->trace_marker_file
, tt
);
6164 /* Limit it for now to 3K (including tag) */
6165 #define RAW_DATA_MAX_SIZE (1024*3)
6168 tracing_mark_raw_write(struct file
*filp
, const char __user
*ubuf
,
6169 size_t cnt
, loff_t
*fpos
)
6171 struct trace_array
*tr
= filp
->private_data
;
6172 struct ring_buffer_event
*event
;
6173 struct ring_buffer
*buffer
;
6174 struct raw_data_entry
*entry
;
6175 const char faulted
[] = "<faulted>";
6176 unsigned long irq_flags
;
6181 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6183 if (tracing_disabled
)
6186 if (!(tr
->trace_flags
& TRACE_ITER_MARKERS
))
6189 /* The marker must at least have a tag id */
6190 if (cnt
< sizeof(unsigned int) || cnt
> RAW_DATA_MAX_SIZE
)
6193 if (cnt
> TRACE_BUF_SIZE
)
6194 cnt
= TRACE_BUF_SIZE
;
6196 BUILD_BUG_ON(TRACE_BUF_SIZE
>= PAGE_SIZE
);
6198 local_save_flags(irq_flags
);
6199 size
= sizeof(*entry
) + cnt
;
6200 if (cnt
< FAULT_SIZE_ID
)
6201 size
+= FAULT_SIZE_ID
- cnt
;
6203 buffer
= tr
->trace_buffer
.buffer
;
6204 event
= __trace_buffer_lock_reserve(buffer
, TRACE_RAW_DATA
, size
,
6205 irq_flags
, preempt_count());
6207 /* Ring buffer disabled, return as if not open for write */
6210 entry
= ring_buffer_event_data(event
);
6212 len
= __copy_from_user_inatomic(&entry
->id
, ubuf
, cnt
);
6215 memcpy(&entry
->buf
, faulted
, FAULTED_SIZE
);
6220 __buffer_unlock_commit(buffer
, event
);
6228 static int tracing_clock_show(struct seq_file
*m
, void *v
)
6230 struct trace_array
*tr
= m
->private;
6233 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++)
6235 "%s%s%s%s", i
? " " : "",
6236 i
== tr
->clock_id
? "[" : "", trace_clocks
[i
].name
,
6237 i
== tr
->clock_id
? "]" : "");
6243 int tracing_set_clock(struct trace_array
*tr
, const char *clockstr
)
6247 for (i
= 0; i
< ARRAY_SIZE(trace_clocks
); i
++) {
6248 if (strcmp(trace_clocks
[i
].name
, clockstr
) == 0)
6251 if (i
== ARRAY_SIZE(trace_clocks
))
6254 mutex_lock(&trace_types_lock
);
6258 ring_buffer_set_clock(tr
->trace_buffer
.buffer
, trace_clocks
[i
].func
);
6261 * New clock may not be consistent with the previous clock.
6262 * Reset the buffer so that it doesn't have incomparable timestamps.
6264 tracing_reset_online_cpus(&tr
->trace_buffer
);
6266 #ifdef CONFIG_TRACER_MAX_TRACE
6267 if (tr
->max_buffer
.buffer
)
6268 ring_buffer_set_clock(tr
->max_buffer
.buffer
, trace_clocks
[i
].func
);
6269 tracing_reset_online_cpus(&tr
->max_buffer
);
6272 mutex_unlock(&trace_types_lock
);
6277 static ssize_t
tracing_clock_write(struct file
*filp
, const char __user
*ubuf
,
6278 size_t cnt
, loff_t
*fpos
)
6280 struct seq_file
*m
= filp
->private_data
;
6281 struct trace_array
*tr
= m
->private;
6283 const char *clockstr
;
6286 if (cnt
>= sizeof(buf
))
6289 if (copy_from_user(buf
, ubuf
, cnt
))
6294 clockstr
= strstrip(buf
);
6296 ret
= tracing_set_clock(tr
, clockstr
);
6305 static int tracing_clock_open(struct inode
*inode
, struct file
*file
)
6307 struct trace_array
*tr
= inode
->i_private
;
6310 if (tracing_disabled
)
6313 if (trace_array_get(tr
))
6316 ret
= single_open(file
, tracing_clock_show
, inode
->i_private
);
6318 trace_array_put(tr
);
6323 static int tracing_time_stamp_mode_show(struct seq_file
*m
, void *v
)
6325 struct trace_array
*tr
= m
->private;
6327 mutex_lock(&trace_types_lock
);
6329 if (ring_buffer_time_stamp_abs(tr
->trace_buffer
.buffer
))
6330 seq_puts(m
, "delta [absolute]\n");
6332 seq_puts(m
, "[delta] absolute\n");
6334 mutex_unlock(&trace_types_lock
);
6339 static int tracing_time_stamp_mode_open(struct inode
*inode
, struct file
*file
)
6341 struct trace_array
*tr
= inode
->i_private
;
6344 if (tracing_disabled
)
6347 if (trace_array_get(tr
))
6350 ret
= single_open(file
, tracing_time_stamp_mode_show
, inode
->i_private
);
6352 trace_array_put(tr
);
6357 int tracing_set_time_stamp_abs(struct trace_array
*tr
, bool abs
)
6361 mutex_lock(&trace_types_lock
);
6363 if (abs
&& tr
->time_stamp_abs_ref
++)
6367 if (WARN_ON_ONCE(!tr
->time_stamp_abs_ref
)) {
6372 if (--tr
->time_stamp_abs_ref
)
6376 ring_buffer_set_time_stamp_abs(tr
->trace_buffer
.buffer
, abs
);
6378 #ifdef CONFIG_TRACER_MAX_TRACE
6379 if (tr
->max_buffer
.buffer
)
6380 ring_buffer_set_time_stamp_abs(tr
->max_buffer
.buffer
, abs
);
6383 mutex_unlock(&trace_types_lock
);
6388 struct ftrace_buffer_info
{
6389 struct trace_iterator iter
;
6391 unsigned int spare_cpu
;
6395 #ifdef CONFIG_TRACER_SNAPSHOT
6396 static int tracing_snapshot_open(struct inode
*inode
, struct file
*file
)
6398 struct trace_array
*tr
= inode
->i_private
;
6399 struct trace_iterator
*iter
;
6403 if (trace_array_get(tr
) < 0)
6406 if (file
->f_mode
& FMODE_READ
) {
6407 iter
= __tracing_open(inode
, file
, true);
6409 ret
= PTR_ERR(iter
);
6411 /* Writes still need the seq_file to hold the private data */
6413 m
= kzalloc(sizeof(*m
), GFP_KERNEL
);
6416 iter
= kzalloc(sizeof(*iter
), GFP_KERNEL
);
6424 iter
->trace_buffer
= &tr
->max_buffer
;
6425 iter
->cpu_file
= tracing_get_cpu(inode
);
6427 file
->private_data
= m
;
6431 trace_array_put(tr
);
6437 tracing_snapshot_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
6440 struct seq_file
*m
= filp
->private_data
;
6441 struct trace_iterator
*iter
= m
->private;
6442 struct trace_array
*tr
= iter
->tr
;
6446 ret
= tracing_update_buffers();
6450 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
6454 mutex_lock(&trace_types_lock
);
6456 if (tr
->current_trace
->use_max_tr
) {
6463 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6467 if (tr
->allocated_snapshot
)
6471 /* Only allow per-cpu swap if the ring buffer supports it */
6472 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6473 if (iter
->cpu_file
!= RING_BUFFER_ALL_CPUS
) {
6478 if (!tr
->allocated_snapshot
) {
6479 ret
= tracing_alloc_snapshot_instance(tr
);
6483 local_irq_disable();
6484 /* Now, we're going to swap */
6485 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6486 update_max_tr(tr
, current
, smp_processor_id());
6488 update_max_tr_single(tr
, current
, iter
->cpu_file
);
6492 if (tr
->allocated_snapshot
) {
6493 if (iter
->cpu_file
== RING_BUFFER_ALL_CPUS
)
6494 tracing_reset_online_cpus(&tr
->max_buffer
);
6496 tracing_reset(&tr
->max_buffer
, iter
->cpu_file
);
6506 mutex_unlock(&trace_types_lock
);
6510 static int tracing_snapshot_release(struct inode
*inode
, struct file
*file
)
6512 struct seq_file
*m
= file
->private_data
;
6515 ret
= tracing_release(inode
, file
);
6517 if (file
->f_mode
& FMODE_READ
)
6520 /* If write only, the seq_file is just a stub */
6528 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
);
6529 static ssize_t
tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6530 size_t count
, loff_t
*ppos
);
6531 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
);
6532 static ssize_t
tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6533 struct pipe_inode_info
*pipe
, size_t len
, unsigned int flags
);
6535 static int snapshot_raw_open(struct inode
*inode
, struct file
*filp
)
6537 struct ftrace_buffer_info
*info
;
6540 ret
= tracing_buffers_open(inode
, filp
);
6544 info
= filp
->private_data
;
6546 if (info
->iter
.trace
->use_max_tr
) {
6547 tracing_buffers_release(inode
, filp
);
6551 info
->iter
.snapshot
= true;
6552 info
->iter
.trace_buffer
= &info
->iter
.tr
->max_buffer
;
6557 #endif /* CONFIG_TRACER_SNAPSHOT */
6560 static const struct file_operations tracing_thresh_fops
= {
6561 .open
= tracing_open_generic
,
6562 .read
= tracing_thresh_read
,
6563 .write
= tracing_thresh_write
,
6564 .llseek
= generic_file_llseek
,
6567 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6568 static const struct file_operations tracing_max_lat_fops
= {
6569 .open
= tracing_open_generic
,
6570 .read
= tracing_max_lat_read
,
6571 .write
= tracing_max_lat_write
,
6572 .llseek
= generic_file_llseek
,
6576 static const struct file_operations set_tracer_fops
= {
6577 .open
= tracing_open_generic
,
6578 .read
= tracing_set_trace_read
,
6579 .write
= tracing_set_trace_write
,
6580 .llseek
= generic_file_llseek
,
6583 static const struct file_operations tracing_pipe_fops
= {
6584 .open
= tracing_open_pipe
,
6585 .poll
= tracing_poll_pipe
,
6586 .read
= tracing_read_pipe
,
6587 .splice_read
= tracing_splice_read_pipe
,
6588 .release
= tracing_release_pipe
,
6589 .llseek
= no_llseek
,
6592 static const struct file_operations tracing_entries_fops
= {
6593 .open
= tracing_open_generic_tr
,
6594 .read
= tracing_entries_read
,
6595 .write
= tracing_entries_write
,
6596 .llseek
= generic_file_llseek
,
6597 .release
= tracing_release_generic_tr
,
6600 static const struct file_operations tracing_total_entries_fops
= {
6601 .open
= tracing_open_generic_tr
,
6602 .read
= tracing_total_entries_read
,
6603 .llseek
= generic_file_llseek
,
6604 .release
= tracing_release_generic_tr
,
6607 static const struct file_operations tracing_free_buffer_fops
= {
6608 .open
= tracing_open_generic_tr
,
6609 .write
= tracing_free_buffer_write
,
6610 .release
= tracing_free_buffer_release
,
6613 static const struct file_operations tracing_mark_fops
= {
6614 .open
= tracing_open_generic_tr
,
6615 .write
= tracing_mark_write
,
6616 .llseek
= generic_file_llseek
,
6617 .release
= tracing_release_generic_tr
,
6620 static const struct file_operations tracing_mark_raw_fops
= {
6621 .open
= tracing_open_generic_tr
,
6622 .write
= tracing_mark_raw_write
,
6623 .llseek
= generic_file_llseek
,
6624 .release
= tracing_release_generic_tr
,
6627 static const struct file_operations trace_clock_fops
= {
6628 .open
= tracing_clock_open
,
6630 .llseek
= seq_lseek
,
6631 .release
= tracing_single_release_tr
,
6632 .write
= tracing_clock_write
,
6635 static const struct file_operations trace_time_stamp_mode_fops
= {
6636 .open
= tracing_time_stamp_mode_open
,
6638 .llseek
= seq_lseek
,
6639 .release
= tracing_single_release_tr
,
6642 #ifdef CONFIG_TRACER_SNAPSHOT
6643 static const struct file_operations snapshot_fops
= {
6644 .open
= tracing_snapshot_open
,
6646 .write
= tracing_snapshot_write
,
6647 .llseek
= tracing_lseek
,
6648 .release
= tracing_snapshot_release
,
6651 static const struct file_operations snapshot_raw_fops
= {
6652 .open
= snapshot_raw_open
,
6653 .read
= tracing_buffers_read
,
6654 .release
= tracing_buffers_release
,
6655 .splice_read
= tracing_buffers_splice_read
,
6656 .llseek
= no_llseek
,
6659 #endif /* CONFIG_TRACER_SNAPSHOT */
6661 static int tracing_buffers_open(struct inode
*inode
, struct file
*filp
)
6663 struct trace_array
*tr
= inode
->i_private
;
6664 struct ftrace_buffer_info
*info
;
6667 if (tracing_disabled
)
6670 if (trace_array_get(tr
) < 0)
6673 info
= kzalloc(sizeof(*info
), GFP_KERNEL
);
6675 trace_array_put(tr
);
6679 mutex_lock(&trace_types_lock
);
6682 info
->iter
.cpu_file
= tracing_get_cpu(inode
);
6683 info
->iter
.trace
= tr
->current_trace
;
6684 info
->iter
.trace_buffer
= &tr
->trace_buffer
;
6686 /* Force reading ring buffer for first read */
6687 info
->read
= (unsigned int)-1;
6689 filp
->private_data
= info
;
6691 tr
->current_trace
->ref
++;
6693 mutex_unlock(&trace_types_lock
);
6695 ret
= nonseekable_open(inode
, filp
);
6697 trace_array_put(tr
);
6703 tracing_buffers_poll(struct file
*filp
, poll_table
*poll_table
)
6705 struct ftrace_buffer_info
*info
= filp
->private_data
;
6706 struct trace_iterator
*iter
= &info
->iter
;
6708 return trace_poll(iter
, filp
, poll_table
);
6712 tracing_buffers_read(struct file
*filp
, char __user
*ubuf
,
6713 size_t count
, loff_t
*ppos
)
6715 struct ftrace_buffer_info
*info
= filp
->private_data
;
6716 struct trace_iterator
*iter
= &info
->iter
;
6723 #ifdef CONFIG_TRACER_MAX_TRACE
6724 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6729 info
->spare
= ring_buffer_alloc_read_page(iter
->trace_buffer
->buffer
,
6731 if (IS_ERR(info
->spare
)) {
6732 ret
= PTR_ERR(info
->spare
);
6735 info
->spare_cpu
= iter
->cpu_file
;
6741 /* Do we have previous read data to read? */
6742 if (info
->read
< PAGE_SIZE
)
6746 trace_access_lock(iter
->cpu_file
);
6747 ret
= ring_buffer_read_page(iter
->trace_buffer
->buffer
,
6751 trace_access_unlock(iter
->cpu_file
);
6754 if (trace_empty(iter
)) {
6755 if ((filp
->f_flags
& O_NONBLOCK
))
6758 ret
= wait_on_pipe(iter
, 0);
6769 size
= PAGE_SIZE
- info
->read
;
6773 ret
= copy_to_user(ubuf
, info
->spare
+ info
->read
, size
);
6785 static int tracing_buffers_release(struct inode
*inode
, struct file
*file
)
6787 struct ftrace_buffer_info
*info
= file
->private_data
;
6788 struct trace_iterator
*iter
= &info
->iter
;
6790 mutex_lock(&trace_types_lock
);
6792 iter
->tr
->current_trace
->ref
--;
6794 __trace_array_put(iter
->tr
);
6797 ring_buffer_free_read_page(iter
->trace_buffer
->buffer
,
6798 info
->spare_cpu
, info
->spare
);
6801 mutex_unlock(&trace_types_lock
);
6807 struct ring_buffer
*buffer
;
6813 static void buffer_pipe_buf_release(struct pipe_inode_info
*pipe
,
6814 struct pipe_buffer
*buf
)
6816 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6821 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6826 static void buffer_pipe_buf_get(struct pipe_inode_info
*pipe
,
6827 struct pipe_buffer
*buf
)
6829 struct buffer_ref
*ref
= (struct buffer_ref
*)buf
->private;
6834 /* Pipe buffer operations for a buffer. */
6835 static const struct pipe_buf_operations buffer_pipe_buf_ops
= {
6837 .confirm
= generic_pipe_buf_confirm
,
6838 .release
= buffer_pipe_buf_release
,
6839 .steal
= generic_pipe_buf_steal
,
6840 .get
= buffer_pipe_buf_get
,
6844 * Callback from splice_to_pipe(), if we need to release some pages
6845 * at the end of the spd in case we error'ed out in filling the pipe.
6847 static void buffer_spd_release(struct splice_pipe_desc
*spd
, unsigned int i
)
6849 struct buffer_ref
*ref
=
6850 (struct buffer_ref
*)spd
->partial
[i
].private;
6855 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
, ref
->page
);
6857 spd
->partial
[i
].private = 0;
6861 tracing_buffers_splice_read(struct file
*file
, loff_t
*ppos
,
6862 struct pipe_inode_info
*pipe
, size_t len
,
6865 struct ftrace_buffer_info
*info
= file
->private_data
;
6866 struct trace_iterator
*iter
= &info
->iter
;
6867 struct partial_page partial_def
[PIPE_DEF_BUFFERS
];
6868 struct page
*pages_def
[PIPE_DEF_BUFFERS
];
6869 struct splice_pipe_desc spd
= {
6871 .partial
= partial_def
,
6872 .nr_pages_max
= PIPE_DEF_BUFFERS
,
6873 .ops
= &buffer_pipe_buf_ops
,
6874 .spd_release
= buffer_spd_release
,
6876 struct buffer_ref
*ref
;
6880 #ifdef CONFIG_TRACER_MAX_TRACE
6881 if (iter
->snapshot
&& iter
->tr
->current_trace
->use_max_tr
)
6885 if (*ppos
& (PAGE_SIZE
- 1))
6888 if (len
& (PAGE_SIZE
- 1)) {
6889 if (len
< PAGE_SIZE
)
6894 if (splice_grow_spd(pipe
, &spd
))
6898 trace_access_lock(iter
->cpu_file
);
6899 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6901 for (i
= 0; i
< spd
.nr_pages_max
&& len
&& entries
; i
++, len
-= PAGE_SIZE
) {
6905 ref
= kzalloc(sizeof(*ref
), GFP_KERNEL
);
6912 ref
->buffer
= iter
->trace_buffer
->buffer
;
6913 ref
->page
= ring_buffer_alloc_read_page(ref
->buffer
, iter
->cpu_file
);
6914 if (IS_ERR(ref
->page
)) {
6915 ret
= PTR_ERR(ref
->page
);
6920 ref
->cpu
= iter
->cpu_file
;
6922 r
= ring_buffer_read_page(ref
->buffer
, &ref
->page
,
6923 len
, iter
->cpu_file
, 1);
6925 ring_buffer_free_read_page(ref
->buffer
, ref
->cpu
,
6931 page
= virt_to_page(ref
->page
);
6933 spd
.pages
[i
] = page
;
6934 spd
.partial
[i
].len
= PAGE_SIZE
;
6935 spd
.partial
[i
].offset
= 0;
6936 spd
.partial
[i
].private = (unsigned long)ref
;
6940 entries
= ring_buffer_entries_cpu(iter
->trace_buffer
->buffer
, iter
->cpu_file
);
6943 trace_access_unlock(iter
->cpu_file
);
6946 /* did we read anything? */
6947 if (!spd
.nr_pages
) {
6952 if ((file
->f_flags
& O_NONBLOCK
) || (flags
& SPLICE_F_NONBLOCK
))
6955 ret
= wait_on_pipe(iter
, iter
->tr
->buffer_percent
);
6962 ret
= splice_to_pipe(pipe
, &spd
);
6964 splice_shrink_spd(&spd
);
6969 static const struct file_operations tracing_buffers_fops
= {
6970 .open
= tracing_buffers_open
,
6971 .read
= tracing_buffers_read
,
6972 .poll
= tracing_buffers_poll
,
6973 .release
= tracing_buffers_release
,
6974 .splice_read
= tracing_buffers_splice_read
,
6975 .llseek
= no_llseek
,
6979 tracing_stats_read(struct file
*filp
, char __user
*ubuf
,
6980 size_t count
, loff_t
*ppos
)
6982 struct inode
*inode
= file_inode(filp
);
6983 struct trace_array
*tr
= inode
->i_private
;
6984 struct trace_buffer
*trace_buf
= &tr
->trace_buffer
;
6985 int cpu
= tracing_get_cpu(inode
);
6986 struct trace_seq
*s
;
6988 unsigned long long t
;
6989 unsigned long usec_rem
;
6991 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
6997 cnt
= ring_buffer_entries_cpu(trace_buf
->buffer
, cpu
);
6998 trace_seq_printf(s
, "entries: %ld\n", cnt
);
7000 cnt
= ring_buffer_overrun_cpu(trace_buf
->buffer
, cpu
);
7001 trace_seq_printf(s
, "overrun: %ld\n", cnt
);
7003 cnt
= ring_buffer_commit_overrun_cpu(trace_buf
->buffer
, cpu
);
7004 trace_seq_printf(s
, "commit overrun: %ld\n", cnt
);
7006 cnt
= ring_buffer_bytes_cpu(trace_buf
->buffer
, cpu
);
7007 trace_seq_printf(s
, "bytes: %ld\n", cnt
);
7009 if (trace_clocks
[tr
->clock_id
].in_ns
) {
7010 /* local or global for trace_clock */
7011 t
= ns2usecs(ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
7012 usec_rem
= do_div(t
, USEC_PER_SEC
);
7013 trace_seq_printf(s
, "oldest event ts: %5llu.%06lu\n",
7016 t
= ns2usecs(ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
7017 usec_rem
= do_div(t
, USEC_PER_SEC
);
7018 trace_seq_printf(s
, "now ts: %5llu.%06lu\n", t
, usec_rem
);
7020 /* counter or tsc mode for trace_clock */
7021 trace_seq_printf(s
, "oldest event ts: %llu\n",
7022 ring_buffer_oldest_event_ts(trace_buf
->buffer
, cpu
));
7024 trace_seq_printf(s
, "now ts: %llu\n",
7025 ring_buffer_time_stamp(trace_buf
->buffer
, cpu
));
7028 cnt
= ring_buffer_dropped_events_cpu(trace_buf
->buffer
, cpu
);
7029 trace_seq_printf(s
, "dropped events: %ld\n", cnt
);
7031 cnt
= ring_buffer_read_events_cpu(trace_buf
->buffer
, cpu
);
7032 trace_seq_printf(s
, "read events: %ld\n", cnt
);
7034 count
= simple_read_from_buffer(ubuf
, count
, ppos
,
7035 s
->buffer
, trace_seq_used(s
));
7042 static const struct file_operations tracing_stats_fops
= {
7043 .open
= tracing_open_generic_tr
,
7044 .read
= tracing_stats_read
,
7045 .llseek
= generic_file_llseek
,
7046 .release
= tracing_release_generic_tr
,
7049 #ifdef CONFIG_DYNAMIC_FTRACE
7052 tracing_read_dyn_info(struct file
*filp
, char __user
*ubuf
,
7053 size_t cnt
, loff_t
*ppos
)
7055 unsigned long *p
= filp
->private_data
;
7056 char buf
[64]; /* Not too big for a shallow stack */
7059 r
= scnprintf(buf
, 63, "%ld", *p
);
7062 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
7065 static const struct file_operations tracing_dyn_info_fops
= {
7066 .open
= tracing_open_generic
,
7067 .read
= tracing_read_dyn_info
,
7068 .llseek
= generic_file_llseek
,
7070 #endif /* CONFIG_DYNAMIC_FTRACE */
7072 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7074 ftrace_snapshot(unsigned long ip
, unsigned long parent_ip
,
7075 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
7078 tracing_snapshot_instance(tr
);
7082 ftrace_count_snapshot(unsigned long ip
, unsigned long parent_ip
,
7083 struct trace_array
*tr
, struct ftrace_probe_ops
*ops
,
7086 struct ftrace_func_mapper
*mapper
= data
;
7090 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7100 tracing_snapshot_instance(tr
);
7104 ftrace_snapshot_print(struct seq_file
*m
, unsigned long ip
,
7105 struct ftrace_probe_ops
*ops
, void *data
)
7107 struct ftrace_func_mapper
*mapper
= data
;
7110 seq_printf(m
, "%ps:", (void *)ip
);
7112 seq_puts(m
, "snapshot");
7115 count
= (long *)ftrace_func_mapper_find_ip(mapper
, ip
);
7118 seq_printf(m
, ":count=%ld\n", *count
);
7120 seq_puts(m
, ":unlimited\n");
7126 ftrace_snapshot_init(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7127 unsigned long ip
, void *init_data
, void **data
)
7129 struct ftrace_func_mapper
*mapper
= *data
;
7132 mapper
= allocate_ftrace_func_mapper();
7138 return ftrace_func_mapper_add_ip(mapper
, ip
, init_data
);
7142 ftrace_snapshot_free(struct ftrace_probe_ops
*ops
, struct trace_array
*tr
,
7143 unsigned long ip
, void *data
)
7145 struct ftrace_func_mapper
*mapper
= data
;
7150 free_ftrace_func_mapper(mapper
, NULL
);
7154 ftrace_func_mapper_remove_ip(mapper
, ip
);
7157 static struct ftrace_probe_ops snapshot_probe_ops
= {
7158 .func
= ftrace_snapshot
,
7159 .print
= ftrace_snapshot_print
,
7162 static struct ftrace_probe_ops snapshot_count_probe_ops
= {
7163 .func
= ftrace_count_snapshot
,
7164 .print
= ftrace_snapshot_print
,
7165 .init
= ftrace_snapshot_init
,
7166 .free
= ftrace_snapshot_free
,
7170 ftrace_trace_snapshot_callback(struct trace_array
*tr
, struct ftrace_hash
*hash
,
7171 char *glob
, char *cmd
, char *param
, int enable
)
7173 struct ftrace_probe_ops
*ops
;
7174 void *count
= (void *)-1;
7181 /* hash funcs only work with set_ftrace_filter */
7185 ops
= param
? &snapshot_count_probe_ops
: &snapshot_probe_ops
;
7188 return unregister_ftrace_function_probe_func(glob
+1, tr
, ops
);
7193 number
= strsep(¶m
, ":");
7195 if (!strlen(number
))
7199 * We use the callback data field (which is a pointer)
7202 ret
= kstrtoul(number
, 0, (unsigned long *)&count
);
7207 ret
= tracing_alloc_snapshot_instance(tr
);
7211 ret
= register_ftrace_function_probe(glob
, tr
, ops
, count
);
7214 return ret
< 0 ? ret
: 0;
7217 static struct ftrace_func_command ftrace_snapshot_cmd
= {
7219 .func
= ftrace_trace_snapshot_callback
,
7222 static __init
int register_snapshot_cmd(void)
7224 return register_ftrace_command(&ftrace_snapshot_cmd
);
7227 static inline __init
int register_snapshot_cmd(void) { return 0; }
7228 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7230 static struct dentry
*tracing_get_dentry(struct trace_array
*tr
)
7232 if (WARN_ON(!tr
->dir
))
7233 return ERR_PTR(-ENODEV
);
7235 /* Top directory uses NULL as the parent */
7236 if (tr
->flags
& TRACE_ARRAY_FL_GLOBAL
)
7239 /* All sub buffers have a descriptor */
7243 static struct dentry
*tracing_dentry_percpu(struct trace_array
*tr
, int cpu
)
7245 struct dentry
*d_tracer
;
7248 return tr
->percpu_dir
;
7250 d_tracer
= tracing_get_dentry(tr
);
7251 if (IS_ERR(d_tracer
))
7254 tr
->percpu_dir
= tracefs_create_dir("per_cpu", d_tracer
);
7256 WARN_ONCE(!tr
->percpu_dir
,
7257 "Could not create tracefs directory 'per_cpu/%d'\n", cpu
);
7259 return tr
->percpu_dir
;
7262 static struct dentry
*
7263 trace_create_cpu_file(const char *name
, umode_t mode
, struct dentry
*parent
,
7264 void *data
, long cpu
, const struct file_operations
*fops
)
7266 struct dentry
*ret
= trace_create_file(name
, mode
, parent
, data
, fops
);
7268 if (ret
) /* See tracing_get_cpu() */
7269 d_inode(ret
)->i_cdev
= (void *)(cpu
+ 1);
7274 tracing_init_tracefs_percpu(struct trace_array
*tr
, long cpu
)
7276 struct dentry
*d_percpu
= tracing_dentry_percpu(tr
, cpu
);
7277 struct dentry
*d_cpu
;
7278 char cpu_dir
[30]; /* 30 characters should be more than enough */
7283 snprintf(cpu_dir
, 30, "cpu%ld", cpu
);
7284 d_cpu
= tracefs_create_dir(cpu_dir
, d_percpu
);
7286 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir
);
7290 /* per cpu trace_pipe */
7291 trace_create_cpu_file("trace_pipe", 0444, d_cpu
,
7292 tr
, cpu
, &tracing_pipe_fops
);
7295 trace_create_cpu_file("trace", 0644, d_cpu
,
7296 tr
, cpu
, &tracing_fops
);
7298 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu
,
7299 tr
, cpu
, &tracing_buffers_fops
);
7301 trace_create_cpu_file("stats", 0444, d_cpu
,
7302 tr
, cpu
, &tracing_stats_fops
);
7304 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu
,
7305 tr
, cpu
, &tracing_entries_fops
);
7307 #ifdef CONFIG_TRACER_SNAPSHOT
7308 trace_create_cpu_file("snapshot", 0644, d_cpu
,
7309 tr
, cpu
, &snapshot_fops
);
7311 trace_create_cpu_file("snapshot_raw", 0444, d_cpu
,
7312 tr
, cpu
, &snapshot_raw_fops
);
7316 #ifdef CONFIG_FTRACE_SELFTEST
7317 /* Let selftest have access to static functions in this file */
7318 #include "trace_selftest.c"
7322 trace_options_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7325 struct trace_option_dentry
*topt
= filp
->private_data
;
7328 if (topt
->flags
->val
& topt
->opt
->bit
)
7333 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7337 trace_options_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7340 struct trace_option_dentry
*topt
= filp
->private_data
;
7344 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7348 if (val
!= 0 && val
!= 1)
7351 if (!!(topt
->flags
->val
& topt
->opt
->bit
) != val
) {
7352 mutex_lock(&trace_types_lock
);
7353 ret
= __set_tracer_option(topt
->tr
, topt
->flags
,
7355 mutex_unlock(&trace_types_lock
);
7366 static const struct file_operations trace_options_fops
= {
7367 .open
= tracing_open_generic
,
7368 .read
= trace_options_read
,
7369 .write
= trace_options_write
,
7370 .llseek
= generic_file_llseek
,
7374 * In order to pass in both the trace_array descriptor as well as the index
7375 * to the flag that the trace option file represents, the trace_array
7376 * has a character array of trace_flags_index[], which holds the index
7377 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7378 * The address of this character array is passed to the flag option file
7379 * read/write callbacks.
7381 * In order to extract both the index and the trace_array descriptor,
7382 * get_tr_index() uses the following algorithm.
7386 * As the pointer itself contains the address of the index (remember
7389 * Then to get the trace_array descriptor, by subtracting that index
7390 * from the ptr, we get to the start of the index itself.
7392 * ptr - idx == &index[0]
7394 * Then a simple container_of() from that pointer gets us to the
7395 * trace_array descriptor.
7397 static void get_tr_index(void *data
, struct trace_array
**ptr
,
7398 unsigned int *pindex
)
7400 *pindex
= *(unsigned char *)data
;
7402 *ptr
= container_of(data
- *pindex
, struct trace_array
,
7407 trace_options_core_read(struct file
*filp
, char __user
*ubuf
, size_t cnt
,
7410 void *tr_index
= filp
->private_data
;
7411 struct trace_array
*tr
;
7415 get_tr_index(tr_index
, &tr
, &index
);
7417 if (tr
->trace_flags
& (1 << index
))
7422 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, 2);
7426 trace_options_core_write(struct file
*filp
, const char __user
*ubuf
, size_t cnt
,
7429 void *tr_index
= filp
->private_data
;
7430 struct trace_array
*tr
;
7435 get_tr_index(tr_index
, &tr
, &index
);
7437 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7441 if (val
!= 0 && val
!= 1)
7444 mutex_lock(&trace_types_lock
);
7445 ret
= set_tracer_flag(tr
, 1 << index
, val
);
7446 mutex_unlock(&trace_types_lock
);
7456 static const struct file_operations trace_options_core_fops
= {
7457 .open
= tracing_open_generic
,
7458 .read
= trace_options_core_read
,
7459 .write
= trace_options_core_write
,
7460 .llseek
= generic_file_llseek
,
7463 struct dentry
*trace_create_file(const char *name
,
7465 struct dentry
*parent
,
7467 const struct file_operations
*fops
)
7471 ret
= tracefs_create_file(name
, mode
, parent
, data
, fops
);
7473 pr_warn("Could not create tracefs '%s' entry\n", name
);
7479 static struct dentry
*trace_options_init_dentry(struct trace_array
*tr
)
7481 struct dentry
*d_tracer
;
7486 d_tracer
= tracing_get_dentry(tr
);
7487 if (IS_ERR(d_tracer
))
7490 tr
->options
= tracefs_create_dir("options", d_tracer
);
7492 pr_warn("Could not create tracefs directory 'options'\n");
7500 create_trace_option_file(struct trace_array
*tr
,
7501 struct trace_option_dentry
*topt
,
7502 struct tracer_flags
*flags
,
7503 struct tracer_opt
*opt
)
7505 struct dentry
*t_options
;
7507 t_options
= trace_options_init_dentry(tr
);
7511 topt
->flags
= flags
;
7515 topt
->entry
= trace_create_file(opt
->name
, 0644, t_options
, topt
,
7516 &trace_options_fops
);
7521 create_trace_option_files(struct trace_array
*tr
, struct tracer
*tracer
)
7523 struct trace_option_dentry
*topts
;
7524 struct trace_options
*tr_topts
;
7525 struct tracer_flags
*flags
;
7526 struct tracer_opt
*opts
;
7533 flags
= tracer
->flags
;
7535 if (!flags
|| !flags
->opts
)
7539 * If this is an instance, only create flags for tracers
7540 * the instance may have.
7542 if (!trace_ok_for_array(tracer
, tr
))
7545 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7546 /* Make sure there's no duplicate flags. */
7547 if (WARN_ON_ONCE(tr
->topts
[i
].tracer
->flags
== tracer
->flags
))
7553 for (cnt
= 0; opts
[cnt
].name
; cnt
++)
7556 topts
= kcalloc(cnt
+ 1, sizeof(*topts
), GFP_KERNEL
);
7560 tr_topts
= krealloc(tr
->topts
, sizeof(*tr
->topts
) * (tr
->nr_topts
+ 1),
7567 tr
->topts
= tr_topts
;
7568 tr
->topts
[tr
->nr_topts
].tracer
= tracer
;
7569 tr
->topts
[tr
->nr_topts
].topts
= topts
;
7572 for (cnt
= 0; opts
[cnt
].name
; cnt
++) {
7573 create_trace_option_file(tr
, &topts
[cnt
], flags
,
7575 WARN_ONCE(topts
[cnt
].entry
== NULL
,
7576 "Failed to create trace option: %s",
7581 static struct dentry
*
7582 create_trace_option_core_file(struct trace_array
*tr
,
7583 const char *option
, long index
)
7585 struct dentry
*t_options
;
7587 t_options
= trace_options_init_dentry(tr
);
7591 return trace_create_file(option
, 0644, t_options
,
7592 (void *)&tr
->trace_flags_index
[index
],
7593 &trace_options_core_fops
);
7596 static void create_trace_options_dir(struct trace_array
*tr
)
7598 struct dentry
*t_options
;
7599 bool top_level
= tr
== &global_trace
;
7602 t_options
= trace_options_init_dentry(tr
);
7606 for (i
= 0; trace_options
[i
]; i
++) {
7608 !((1 << i
) & TOP_LEVEL_TRACE_FLAGS
))
7609 create_trace_option_core_file(tr
, trace_options
[i
], i
);
7614 rb_simple_read(struct file
*filp
, char __user
*ubuf
,
7615 size_t cnt
, loff_t
*ppos
)
7617 struct trace_array
*tr
= filp
->private_data
;
7621 r
= tracer_tracing_is_on(tr
);
7622 r
= sprintf(buf
, "%d\n", r
);
7624 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
7628 rb_simple_write(struct file
*filp
, const char __user
*ubuf
,
7629 size_t cnt
, loff_t
*ppos
)
7631 struct trace_array
*tr
= filp
->private_data
;
7632 struct ring_buffer
*buffer
= tr
->trace_buffer
.buffer
;
7636 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7641 mutex_lock(&trace_types_lock
);
7642 if (!!val
== tracer_tracing_is_on(tr
)) {
7643 val
= 0; /* do nothing */
7645 tracer_tracing_on(tr
);
7646 if (tr
->current_trace
->start
)
7647 tr
->current_trace
->start(tr
);
7649 tracer_tracing_off(tr
);
7650 if (tr
->current_trace
->stop
)
7651 tr
->current_trace
->stop(tr
);
7653 mutex_unlock(&trace_types_lock
);
7661 static const struct file_operations rb_simple_fops
= {
7662 .open
= tracing_open_generic_tr
,
7663 .read
= rb_simple_read
,
7664 .write
= rb_simple_write
,
7665 .release
= tracing_release_generic_tr
,
7666 .llseek
= default_llseek
,
7670 buffer_percent_read(struct file
*filp
, char __user
*ubuf
,
7671 size_t cnt
, loff_t
*ppos
)
7673 struct trace_array
*tr
= filp
->private_data
;
7677 r
= tr
->buffer_percent
;
7678 r
= sprintf(buf
, "%d\n", r
);
7680 return simple_read_from_buffer(ubuf
, cnt
, ppos
, buf
, r
);
7684 buffer_percent_write(struct file
*filp
, const char __user
*ubuf
,
7685 size_t cnt
, loff_t
*ppos
)
7687 struct trace_array
*tr
= filp
->private_data
;
7691 ret
= kstrtoul_from_user(ubuf
, cnt
, 10, &val
);
7701 tr
->buffer_percent
= val
;
7708 static const struct file_operations buffer_percent_fops
= {
7709 .open
= tracing_open_generic_tr
,
7710 .read
= buffer_percent_read
,
7711 .write
= buffer_percent_write
,
7712 .release
= tracing_release_generic_tr
,
7713 .llseek
= default_llseek
,
7716 struct dentry
*trace_instance_dir
;
7719 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
);
7722 allocate_trace_buffer(struct trace_array
*tr
, struct trace_buffer
*buf
, int size
)
7724 enum ring_buffer_flags rb_flags
;
7726 rb_flags
= tr
->trace_flags
& TRACE_ITER_OVERWRITE
? RB_FL_OVERWRITE
: 0;
7730 buf
->buffer
= ring_buffer_alloc(size
, rb_flags
);
7734 buf
->data
= alloc_percpu(struct trace_array_cpu
);
7736 ring_buffer_free(buf
->buffer
);
7741 /* Allocate the first page for all buffers */
7742 set_buffer_entries(&tr
->trace_buffer
,
7743 ring_buffer_size(tr
->trace_buffer
.buffer
, 0));
7748 static int allocate_trace_buffers(struct trace_array
*tr
, int size
)
7752 ret
= allocate_trace_buffer(tr
, &tr
->trace_buffer
, size
);
7756 #ifdef CONFIG_TRACER_MAX_TRACE
7757 ret
= allocate_trace_buffer(tr
, &tr
->max_buffer
,
7758 allocate_snapshot
? size
: 1);
7760 ring_buffer_free(tr
->trace_buffer
.buffer
);
7761 tr
->trace_buffer
.buffer
= NULL
;
7762 free_percpu(tr
->trace_buffer
.data
);
7763 tr
->trace_buffer
.data
= NULL
;
7766 tr
->allocated_snapshot
= allocate_snapshot
;
7769 * Only the top level trace array gets its snapshot allocated
7770 * from the kernel command line.
7772 allocate_snapshot
= false;
7777 static void free_trace_buffer(struct trace_buffer
*buf
)
7780 ring_buffer_free(buf
->buffer
);
7782 free_percpu(buf
->data
);
7787 static void free_trace_buffers(struct trace_array
*tr
)
7792 free_trace_buffer(&tr
->trace_buffer
);
7794 #ifdef CONFIG_TRACER_MAX_TRACE
7795 free_trace_buffer(&tr
->max_buffer
);
7799 static void init_trace_flags_index(struct trace_array
*tr
)
7803 /* Used by the trace options files */
7804 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++)
7805 tr
->trace_flags_index
[i
] = i
;
7808 static void __update_tracer_options(struct trace_array
*tr
)
7812 for (t
= trace_types
; t
; t
= t
->next
)
7813 add_tracer_options(tr
, t
);
7816 static void update_tracer_options(struct trace_array
*tr
)
7818 mutex_lock(&trace_types_lock
);
7819 __update_tracer_options(tr
);
7820 mutex_unlock(&trace_types_lock
);
7823 static int instance_mkdir(const char *name
)
7825 struct trace_array
*tr
;
7828 mutex_lock(&event_mutex
);
7829 mutex_lock(&trace_types_lock
);
7832 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7833 if (tr
->name
&& strcmp(tr
->name
, name
) == 0)
7838 tr
= kzalloc(sizeof(*tr
), GFP_KERNEL
);
7842 tr
->name
= kstrdup(name
, GFP_KERNEL
);
7846 if (!alloc_cpumask_var(&tr
->tracing_cpumask
, GFP_KERNEL
))
7849 tr
->trace_flags
= global_trace
.trace_flags
& ~ZEROED_TRACE_FLAGS
;
7851 cpumask_copy(tr
->tracing_cpumask
, cpu_all_mask
);
7853 raw_spin_lock_init(&tr
->start_lock
);
7855 tr
->max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
7857 tr
->current_trace
= &nop_trace
;
7859 INIT_LIST_HEAD(&tr
->systems
);
7860 INIT_LIST_HEAD(&tr
->events
);
7861 INIT_LIST_HEAD(&tr
->hist_vars
);
7863 if (allocate_trace_buffers(tr
, trace_buf_size
) < 0)
7866 tr
->dir
= tracefs_create_dir(name
, trace_instance_dir
);
7870 ret
= event_trace_add_tracer(tr
->dir
, tr
);
7872 tracefs_remove_recursive(tr
->dir
);
7876 ftrace_init_trace_array(tr
);
7878 init_tracer_tracefs(tr
, tr
->dir
);
7879 init_trace_flags_index(tr
);
7880 __update_tracer_options(tr
);
7882 list_add(&tr
->list
, &ftrace_trace_arrays
);
7884 mutex_unlock(&trace_types_lock
);
7885 mutex_unlock(&event_mutex
);
7890 free_trace_buffers(tr
);
7891 free_cpumask_var(tr
->tracing_cpumask
);
7896 mutex_unlock(&trace_types_lock
);
7897 mutex_unlock(&event_mutex
);
7903 static int instance_rmdir(const char *name
)
7905 struct trace_array
*tr
;
7910 mutex_lock(&event_mutex
);
7911 mutex_lock(&trace_types_lock
);
7914 list_for_each_entry(tr
, &ftrace_trace_arrays
, list
) {
7915 if (tr
->name
&& strcmp(tr
->name
, name
) == 0) {
7924 if (tr
->ref
|| (tr
->current_trace
&& tr
->current_trace
->ref
))
7927 list_del(&tr
->list
);
7929 /* Disable all the flags that were enabled coming in */
7930 for (i
= 0; i
< TRACE_FLAGS_MAX_SIZE
; i
++) {
7931 if ((1 << i
) & ZEROED_TRACE_FLAGS
)
7932 set_tracer_flag(tr
, 1 << i
, 0);
7935 tracing_set_nop(tr
);
7936 clear_ftrace_function_probes(tr
);
7937 event_trace_del_tracer(tr
);
7938 ftrace_clear_pids(tr
);
7939 ftrace_destroy_function_files(tr
);
7940 tracefs_remove_recursive(tr
->dir
);
7941 free_trace_buffers(tr
);
7943 for (i
= 0; i
< tr
->nr_topts
; i
++) {
7944 kfree(tr
->topts
[i
].topts
);
7948 free_cpumask_var(tr
->tracing_cpumask
);
7955 mutex_unlock(&trace_types_lock
);
7956 mutex_unlock(&event_mutex
);
7961 static __init
void create_trace_instances(struct dentry
*d_tracer
)
7963 trace_instance_dir
= tracefs_create_instance_dir("instances", d_tracer
,
7966 if (WARN_ON(!trace_instance_dir
))
7971 init_tracer_tracefs(struct trace_array
*tr
, struct dentry
*d_tracer
)
7973 struct trace_event_file
*file
;
7976 trace_create_file("available_tracers", 0444, d_tracer
,
7977 tr
, &show_traces_fops
);
7979 trace_create_file("current_tracer", 0644, d_tracer
,
7980 tr
, &set_tracer_fops
);
7982 trace_create_file("tracing_cpumask", 0644, d_tracer
,
7983 tr
, &tracing_cpumask_fops
);
7985 trace_create_file("trace_options", 0644, d_tracer
,
7986 tr
, &tracing_iter_fops
);
7988 trace_create_file("trace", 0644, d_tracer
,
7991 trace_create_file("trace_pipe", 0444, d_tracer
,
7992 tr
, &tracing_pipe_fops
);
7994 trace_create_file("buffer_size_kb", 0644, d_tracer
,
7995 tr
, &tracing_entries_fops
);
7997 trace_create_file("buffer_total_size_kb", 0444, d_tracer
,
7998 tr
, &tracing_total_entries_fops
);
8000 trace_create_file("free_buffer", 0200, d_tracer
,
8001 tr
, &tracing_free_buffer_fops
);
8003 trace_create_file("trace_marker", 0220, d_tracer
,
8004 tr
, &tracing_mark_fops
);
8006 file
= __find_event_file(tr
, "ftrace", "print");
8007 if (file
&& file
->dir
)
8008 trace_create_file("trigger", 0644, file
->dir
, file
,
8009 &event_trigger_fops
);
8010 tr
->trace_marker_file
= file
;
8012 trace_create_file("trace_marker_raw", 0220, d_tracer
,
8013 tr
, &tracing_mark_raw_fops
);
8015 trace_create_file("trace_clock", 0644, d_tracer
, tr
,
8018 trace_create_file("tracing_on", 0644, d_tracer
,
8019 tr
, &rb_simple_fops
);
8021 trace_create_file("timestamp_mode", 0444, d_tracer
, tr
,
8022 &trace_time_stamp_mode_fops
);
8024 tr
->buffer_percent
= 50;
8026 trace_create_file("buffer_percent", 0444, d_tracer
,
8027 tr
, &buffer_percent_fops
);
8029 create_trace_options_dir(tr
);
8031 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8032 trace_create_file("tracing_max_latency", 0644, d_tracer
,
8033 &tr
->max_latency
, &tracing_max_lat_fops
);
8036 if (ftrace_create_function_files(tr
, d_tracer
))
8037 WARN(1, "Could not allocate function filter files");
8039 #ifdef CONFIG_TRACER_SNAPSHOT
8040 trace_create_file("snapshot", 0644, d_tracer
,
8041 tr
, &snapshot_fops
);
8044 for_each_tracing_cpu(cpu
)
8045 tracing_init_tracefs_percpu(tr
, cpu
);
8047 ftrace_init_tracefs(tr
, d_tracer
);
8050 static struct vfsmount
*trace_automount(struct dentry
*mntpt
, void *ingore
)
8052 struct vfsmount
*mnt
;
8053 struct file_system_type
*type
;
8056 * To maintain backward compatibility for tools that mount
8057 * debugfs to get to the tracing facility, tracefs is automatically
8058 * mounted to the debugfs/tracing directory.
8060 type
= get_fs_type("tracefs");
8063 mnt
= vfs_submount(mntpt
, type
, "tracefs", NULL
);
8064 put_filesystem(type
);
8073 * tracing_init_dentry - initialize top level trace array
8075 * This is called when creating files or directories in the tracing
8076 * directory. It is called via fs_initcall() by any of the boot up code
8077 * and expects to return the dentry of the top level tracing directory.
8079 struct dentry
*tracing_init_dentry(void)
8081 struct trace_array
*tr
= &global_trace
;
8083 /* The top level trace array uses NULL as parent */
8087 if (WARN_ON(!tracefs_initialized()) ||
8088 (IS_ENABLED(CONFIG_DEBUG_FS
) &&
8089 WARN_ON(!debugfs_initialized())))
8090 return ERR_PTR(-ENODEV
);
8093 * As there may still be users that expect the tracing
8094 * files to exist in debugfs/tracing, we must automount
8095 * the tracefs file system there, so older tools still
8096 * work with the newer kerenl.
8098 tr
->dir
= debugfs_create_automount("tracing", NULL
,
8099 trace_automount
, NULL
);
8101 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8102 return ERR_PTR(-ENOMEM
);
8108 extern struct trace_eval_map
*__start_ftrace_eval_maps
[];
8109 extern struct trace_eval_map
*__stop_ftrace_eval_maps
[];
8111 static void __init
trace_eval_init(void)
8115 len
= __stop_ftrace_eval_maps
- __start_ftrace_eval_maps
;
8116 trace_insert_eval_map(NULL
, __start_ftrace_eval_maps
, len
);
8119 #ifdef CONFIG_MODULES
8120 static void trace_module_add_evals(struct module
*mod
)
8122 if (!mod
->num_trace_evals
)
8126 * Modules with bad taint do not have events created, do
8127 * not bother with enums either.
8129 if (trace_module_has_bad_taint(mod
))
8132 trace_insert_eval_map(mod
, mod
->trace_evals
, mod
->num_trace_evals
);
8135 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8136 static void trace_module_remove_evals(struct module
*mod
)
8138 union trace_eval_map_item
*map
;
8139 union trace_eval_map_item
**last
= &trace_eval_maps
;
8141 if (!mod
->num_trace_evals
)
8144 mutex_lock(&trace_eval_mutex
);
8146 map
= trace_eval_maps
;
8149 if (map
->head
.mod
== mod
)
8151 map
= trace_eval_jmp_to_tail(map
);
8152 last
= &map
->tail
.next
;
8153 map
= map
->tail
.next
;
8158 *last
= trace_eval_jmp_to_tail(map
)->tail
.next
;
8161 mutex_unlock(&trace_eval_mutex
);
8164 static inline void trace_module_remove_evals(struct module
*mod
) { }
8165 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8167 static int trace_module_notify(struct notifier_block
*self
,
8168 unsigned long val
, void *data
)
8170 struct module
*mod
= data
;
8173 case MODULE_STATE_COMING
:
8174 trace_module_add_evals(mod
);
8176 case MODULE_STATE_GOING
:
8177 trace_module_remove_evals(mod
);
8184 static struct notifier_block trace_module_nb
= {
8185 .notifier_call
= trace_module_notify
,
8188 #endif /* CONFIG_MODULES */
8190 static __init
int tracer_init_tracefs(void)
8192 struct dentry
*d_tracer
;
8194 trace_access_lock_init();
8196 d_tracer
= tracing_init_dentry();
8197 if (IS_ERR(d_tracer
))
8202 init_tracer_tracefs(&global_trace
, d_tracer
);
8203 ftrace_init_tracefs_toplevel(&global_trace
, d_tracer
);
8205 trace_create_file("tracing_thresh", 0644, d_tracer
,
8206 &global_trace
, &tracing_thresh_fops
);
8208 trace_create_file("README", 0444, d_tracer
,
8209 NULL
, &tracing_readme_fops
);
8211 trace_create_file("saved_cmdlines", 0444, d_tracer
,
8212 NULL
, &tracing_saved_cmdlines_fops
);
8214 trace_create_file("saved_cmdlines_size", 0644, d_tracer
,
8215 NULL
, &tracing_saved_cmdlines_size_fops
);
8217 trace_create_file("saved_tgids", 0444, d_tracer
,
8218 NULL
, &tracing_saved_tgids_fops
);
8222 trace_create_eval_file(d_tracer
);
8224 #ifdef CONFIG_MODULES
8225 register_module_notifier(&trace_module_nb
);
8228 #ifdef CONFIG_DYNAMIC_FTRACE
8229 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer
,
8230 &ftrace_update_tot_cnt
, &tracing_dyn_info_fops
);
8233 create_trace_instances(d_tracer
);
8235 update_tracer_options(&global_trace
);
8240 static int trace_panic_handler(struct notifier_block
*this,
8241 unsigned long event
, void *unused
)
8243 if (ftrace_dump_on_oops
)
8244 ftrace_dump(ftrace_dump_on_oops
);
8248 static struct notifier_block trace_panic_notifier
= {
8249 .notifier_call
= trace_panic_handler
,
8251 .priority
= 150 /* priority: INT_MAX >= x >= 0 */
8254 static int trace_die_handler(struct notifier_block
*self
,
8260 if (ftrace_dump_on_oops
)
8261 ftrace_dump(ftrace_dump_on_oops
);
8269 static struct notifier_block trace_die_notifier
= {
8270 .notifier_call
= trace_die_handler
,
8275 * printk is set to max of 1024, we really don't need it that big.
8276 * Nothing should be printing 1000 characters anyway.
8278 #define TRACE_MAX_PRINT 1000
8281 * Define here KERN_TRACE so that we have one place to modify
8282 * it if we decide to change what log level the ftrace dump
8285 #define KERN_TRACE KERN_EMERG
8288 trace_printk_seq(struct trace_seq
*s
)
8290 /* Probably should print a warning here. */
8291 if (s
->seq
.len
>= TRACE_MAX_PRINT
)
8292 s
->seq
.len
= TRACE_MAX_PRINT
;
8295 * More paranoid code. Although the buffer size is set to
8296 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8297 * an extra layer of protection.
8299 if (WARN_ON_ONCE(s
->seq
.len
>= s
->seq
.size
))
8300 s
->seq
.len
= s
->seq
.size
- 1;
8302 /* should be zero ended, but we are paranoid. */
8303 s
->buffer
[s
->seq
.len
] = 0;
8305 printk(KERN_TRACE
"%s", s
->buffer
);
8310 void trace_init_global_iter(struct trace_iterator
*iter
)
8312 iter
->tr
= &global_trace
;
8313 iter
->trace
= iter
->tr
->current_trace
;
8314 iter
->cpu_file
= RING_BUFFER_ALL_CPUS
;
8315 iter
->trace_buffer
= &global_trace
.trace_buffer
;
8317 if (iter
->trace
&& iter
->trace
->open
)
8318 iter
->trace
->open(iter
);
8320 /* Annotate start of buffers if we had overruns */
8321 if (ring_buffer_overruns(iter
->trace_buffer
->buffer
))
8322 iter
->iter_flags
|= TRACE_FILE_ANNOTATE
;
8324 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8325 if (trace_clocks
[iter
->tr
->clock_id
].in_ns
)
8326 iter
->iter_flags
|= TRACE_FILE_TIME_IN_NS
;
8329 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode
)
8331 /* use static because iter can be a bit big for the stack */
8332 static struct trace_iterator iter
;
8333 static atomic_t dump_running
;
8334 struct trace_array
*tr
= &global_trace
;
8335 unsigned int old_userobj
;
8336 unsigned long flags
;
8339 /* Only allow one dump user at a time. */
8340 if (atomic_inc_return(&dump_running
) != 1) {
8341 atomic_dec(&dump_running
);
8346 * Always turn off tracing when we dump.
8347 * We don't need to show trace output of what happens
8348 * between multiple crashes.
8350 * If the user does a sysrq-z, then they can re-enable
8351 * tracing with echo 1 > tracing_on.
8355 local_irq_save(flags
);
8356 printk_nmi_direct_enter();
8358 /* Simulate the iterator */
8359 trace_init_global_iter(&iter
);
8361 for_each_tracing_cpu(cpu
) {
8362 atomic_inc(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8365 old_userobj
= tr
->trace_flags
& TRACE_ITER_SYM_USEROBJ
;
8367 /* don't look at user memory in panic mode */
8368 tr
->trace_flags
&= ~TRACE_ITER_SYM_USEROBJ
;
8370 switch (oops_dump_mode
) {
8372 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8375 iter
.cpu_file
= raw_smp_processor_id();
8380 printk(KERN_TRACE
"Bad dumping mode, switching to all CPUs dump\n");
8381 iter
.cpu_file
= RING_BUFFER_ALL_CPUS
;
8384 printk(KERN_TRACE
"Dumping ftrace buffer:\n");
8386 /* Did function tracer already get disabled? */
8387 if (ftrace_is_dead()) {
8388 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8389 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8393 * We need to stop all tracing on all CPUS to read the
8394 * the next buffer. This is a bit expensive, but is
8395 * not done often. We fill all what we can read,
8396 * and then release the locks again.
8399 while (!trace_empty(&iter
)) {
8402 printk(KERN_TRACE
"---------------------------------\n");
8406 /* reset all but tr, trace, and overruns */
8407 memset(&iter
.seq
, 0,
8408 sizeof(struct trace_iterator
) -
8409 offsetof(struct trace_iterator
, seq
));
8410 iter
.iter_flags
|= TRACE_FILE_LAT_FMT
;
8413 if (trace_find_next_entry_inc(&iter
) != NULL
) {
8416 ret
= print_trace_line(&iter
);
8417 if (ret
!= TRACE_TYPE_NO_CONSUME
)
8418 trace_consume(&iter
);
8420 touch_nmi_watchdog();
8422 trace_printk_seq(&iter
.seq
);
8426 printk(KERN_TRACE
" (ftrace buffer empty)\n");
8428 printk(KERN_TRACE
"---------------------------------\n");
8431 tr
->trace_flags
|= old_userobj
;
8433 for_each_tracing_cpu(cpu
) {
8434 atomic_dec(&per_cpu_ptr(iter
.trace_buffer
->data
, cpu
)->disabled
);
8436 atomic_dec(&dump_running
);
8437 printk_nmi_direct_exit();
8438 local_irq_restore(flags
);
8440 EXPORT_SYMBOL_GPL(ftrace_dump
);
8442 int trace_run_command(const char *buf
, int (*createfn
)(int, char **))
8449 argv
= argv_split(GFP_KERNEL
, buf
, &argc
);
8454 ret
= createfn(argc
, argv
);
8461 #define WRITE_BUFSIZE 4096
8463 ssize_t
trace_parse_run_command(struct file
*file
, const char __user
*buffer
,
8464 size_t count
, loff_t
*ppos
,
8465 int (*createfn
)(int, char **))
8467 char *kbuf
, *buf
, *tmp
;
8472 kbuf
= kmalloc(WRITE_BUFSIZE
, GFP_KERNEL
);
8476 while (done
< count
) {
8477 size
= count
- done
;
8479 if (size
>= WRITE_BUFSIZE
)
8480 size
= WRITE_BUFSIZE
- 1;
8482 if (copy_from_user(kbuf
, buffer
+ done
, size
)) {
8489 tmp
= strchr(buf
, '\n');
8492 size
= tmp
- buf
+ 1;
8495 if (done
+ size
< count
) {
8498 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8499 pr_warn("Line length is too long: Should be less than %d\n",
8507 /* Remove comments */
8508 tmp
= strchr(buf
, '#');
8513 ret
= trace_run_command(buf
, createfn
);
8518 } while (done
< count
);
8528 __init
static int tracer_alloc_buffers(void)
8534 * Make sure we don't accidently add more trace options
8535 * than we have bits for.
8537 BUILD_BUG_ON(TRACE_ITER_LAST_BIT
> TRACE_FLAGS_MAX_SIZE
);
8539 if (!alloc_cpumask_var(&tracing_buffer_mask
, GFP_KERNEL
))
8542 if (!alloc_cpumask_var(&global_trace
.tracing_cpumask
, GFP_KERNEL
))
8543 goto out_free_buffer_mask
;
8545 /* Only allocate trace_printk buffers if a trace_printk exists */
8546 if (__stop___trace_bprintk_fmt
!= __start___trace_bprintk_fmt
)
8547 /* Must be called before global_trace.buffer is allocated */
8548 trace_printk_init_buffers();
8550 /* To save memory, keep the ring buffer size to its minimum */
8551 if (ring_buffer_expanded
)
8552 ring_buf_size
= trace_buf_size
;
8556 cpumask_copy(tracing_buffer_mask
, cpu_possible_mask
);
8557 cpumask_copy(global_trace
.tracing_cpumask
, cpu_all_mask
);
8559 raw_spin_lock_init(&global_trace
.start_lock
);
8562 * The prepare callbacks allocates some memory for the ring buffer. We
8563 * don't free the buffer if the if the CPU goes down. If we were to free
8564 * the buffer, then the user would lose any trace that was in the
8565 * buffer. The memory will be removed once the "instance" is removed.
8567 ret
= cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE
,
8568 "trace/RB:preapre", trace_rb_cpu_prepare
,
8571 goto out_free_cpumask
;
8572 /* Used for event triggers */
8574 temp_buffer
= ring_buffer_alloc(PAGE_SIZE
, RB_FL_OVERWRITE
);
8576 goto out_rm_hp_state
;
8578 if (trace_create_savedcmd() < 0)
8579 goto out_free_temp_buffer
;
8581 /* TODO: make the number of buffers hot pluggable with CPUS */
8582 if (allocate_trace_buffers(&global_trace
, ring_buf_size
) < 0) {
8583 printk(KERN_ERR
"tracer: failed to allocate ring buffer!\n");
8585 goto out_free_savedcmd
;
8588 if (global_trace
.buffer_disabled
)
8591 if (trace_boot_clock
) {
8592 ret
= tracing_set_clock(&global_trace
, trace_boot_clock
);
8594 pr_warn("Trace clock %s not defined, going back to default\n",
8599 * register_tracer() might reference current_trace, so it
8600 * needs to be set before we register anything. This is
8601 * just a bootstrap of current_trace anyway.
8603 global_trace
.current_trace
= &nop_trace
;
8605 global_trace
.max_lock
= (arch_spinlock_t
)__ARCH_SPIN_LOCK_UNLOCKED
;
8607 ftrace_init_global_array_ops(&global_trace
);
8609 init_trace_flags_index(&global_trace
);
8611 register_tracer(&nop_trace
);
8613 /* Function tracing may start here (via kernel command line) */
8614 init_function_trace();
8616 /* All seems OK, enable tracing */
8617 tracing_disabled
= 0;
8619 atomic_notifier_chain_register(&panic_notifier_list
,
8620 &trace_panic_notifier
);
8622 register_die_notifier(&trace_die_notifier
);
8624 global_trace
.flags
= TRACE_ARRAY_FL_GLOBAL
;
8626 INIT_LIST_HEAD(&global_trace
.systems
);
8627 INIT_LIST_HEAD(&global_trace
.events
);
8628 INIT_LIST_HEAD(&global_trace
.hist_vars
);
8629 list_add(&global_trace
.list
, &ftrace_trace_arrays
);
8631 apply_trace_boot_options();
8633 register_snapshot_cmd();
8638 free_saved_cmdlines_buffer(savedcmd
);
8639 out_free_temp_buffer
:
8640 ring_buffer_free(temp_buffer
);
8642 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE
);
8644 free_cpumask_var(global_trace
.tracing_cpumask
);
8645 out_free_buffer_mask
:
8646 free_cpumask_var(tracing_buffer_mask
);
8651 void __init
early_trace_init(void)
8653 if (tracepoint_printk
) {
8654 tracepoint_print_iter
=
8655 kmalloc(sizeof(*tracepoint_print_iter
), GFP_KERNEL
);
8656 if (WARN_ON(!tracepoint_print_iter
))
8657 tracepoint_printk
= 0;
8659 static_key_enable(&tracepoint_printk_key
.key
);
8661 tracer_alloc_buffers();
8664 void __init
trace_init(void)
8669 __init
static int clear_boot_tracer(void)
8672 * The default tracer at boot buffer is an init section.
8673 * This function is called in lateinit. If we did not
8674 * find the boot tracer, then clear it out, to prevent
8675 * later registration from accessing the buffer that is
8676 * about to be freed.
8678 if (!default_bootup_tracer
)
8681 printk(KERN_INFO
"ftrace bootup tracer '%s' not registered.\n",
8682 default_bootup_tracer
);
8683 default_bootup_tracer
= NULL
;
8688 fs_initcall(tracer_init_tracefs
);
8689 late_initcall_sync(clear_boot_tracer
);
8691 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8692 __init
static int tracing_set_default_clock(void)
8694 /* sched_clock_stable() is determined in late_initcall */
8695 if (!trace_boot_clock
&& !sched_clock_stable()) {
8697 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8698 "If you want to keep using the local clock, then add:\n"
8699 " \"trace_clock=local\"\n"
8700 "on the kernel command line\n");
8701 tracing_set_clock(&global_trace
, "global");
8706 late_initcall_sync(tracing_set_default_clock
);