]> git.ipfire.org Git - thirdparty/kernel/linux.git/blob - kernel/trace/trace.c
tracing: Add unified dynamic event framework
[thirdparty/kernel/linux.git] / kernel / trace / trace.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * ring buffer based function tracer
4 *
5 * Copyright (C) 2007-2012 Steven Rostedt <srostedt@redhat.com>
6 * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
7 *
8 * Originally taken from the RT patch by:
9 * Arnaldo Carvalho de Melo <acme@redhat.com>
10 *
11 * Based on code from the latency_tracer, that is:
12 * Copyright (C) 2004-2006 Ingo Molnar
13 * Copyright (C) 2004 Nadia Yvette Chambers
14 */
15 #include <linux/ring_buffer.h>
16 #include <generated/utsrelease.h>
17 #include <linux/stacktrace.h>
18 #include <linux/writeback.h>
19 #include <linux/kallsyms.h>
20 #include <linux/seq_file.h>
21 #include <linux/notifier.h>
22 #include <linux/irqflags.h>
23 #include <linux/debugfs.h>
24 #include <linux/tracefs.h>
25 #include <linux/pagemap.h>
26 #include <linux/hardirq.h>
27 #include <linux/linkage.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/ftrace.h>
31 #include <linux/module.h>
32 #include <linux/percpu.h>
33 #include <linux/splice.h>
34 #include <linux/kdebug.h>
35 #include <linux/string.h>
36 #include <linux/mount.h>
37 #include <linux/rwsem.h>
38 #include <linux/slab.h>
39 #include <linux/ctype.h>
40 #include <linux/init.h>
41 #include <linux/poll.h>
42 #include <linux/nmi.h>
43 #include <linux/fs.h>
44 #include <linux/trace.h>
45 #include <linux/sched/clock.h>
46 #include <linux/sched/rt.h>
47
48 #include "trace.h"
49 #include "trace_output.h"
50
51 /*
52 * On boot up, the ring buffer is set to the minimum size, so that
53 * we do not waste memory on systems that are not using tracing.
54 */
55 bool ring_buffer_expanded;
56
57 /*
58 * We need to change this state when a selftest is running.
59 * A selftest will lurk into the ring-buffer to count the
60 * entries inserted during the selftest although some concurrent
61 * insertions into the ring-buffer such as trace_printk could occurred
62 * at the same time, giving false positive or negative results.
63 */
64 static bool __read_mostly tracing_selftest_running;
65
66 /*
67 * If a tracer is running, we do not want to run SELFTEST.
68 */
69 bool __read_mostly tracing_selftest_disabled;
70
71 /* Pipe tracepoints to printk */
72 struct trace_iterator *tracepoint_print_iter;
73 int tracepoint_printk;
74 static DEFINE_STATIC_KEY_FALSE(tracepoint_printk_key);
75
76 /* For tracers that don't implement custom flags */
77 static struct tracer_opt dummy_tracer_opt[] = {
78 { }
79 };
80
81 static int
82 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
83 {
84 return 0;
85 }
86
87 /*
88 * To prevent the comm cache from being overwritten when no
89 * tracing is active, only save the comm when a trace event
90 * occurred.
91 */
92 static DEFINE_PER_CPU(bool, trace_taskinfo_save);
93
94 /*
95 * Kill all tracing for good (never come back).
96 * It is initialized to 1 but will turn to zero if the initialization
97 * of the tracer is successful. But that is the only place that sets
98 * this back to zero.
99 */
100 static int tracing_disabled = 1;
101
102 cpumask_var_t __read_mostly tracing_buffer_mask;
103
104 /*
105 * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
106 *
107 * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
108 * is set, then ftrace_dump is called. This will output the contents
109 * of the ftrace buffers to the console. This is very useful for
110 * capturing traces that lead to crashes and outputing it to a
111 * serial console.
112 *
113 * It is default off, but you can enable it with either specifying
114 * "ftrace_dump_on_oops" in the kernel command line, or setting
115 * /proc/sys/kernel/ftrace_dump_on_oops
116 * Set 1 if you want to dump buffers of all CPUs
117 * Set 2 if you want to dump the buffer of the CPU that triggered oops
118 */
119
120 enum ftrace_dump_mode ftrace_dump_on_oops;
121
122 /* When set, tracing will stop when a WARN*() is hit */
123 int __disable_trace_on_warning;
124
125 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
126 /* Map of enums to their values, for "eval_map" file */
127 struct trace_eval_map_head {
128 struct module *mod;
129 unsigned long length;
130 };
131
132 union trace_eval_map_item;
133
134 struct trace_eval_map_tail {
135 /*
136 * "end" is first and points to NULL as it must be different
137 * than "mod" or "eval_string"
138 */
139 union trace_eval_map_item *next;
140 const char *end; /* points to NULL */
141 };
142
143 static DEFINE_MUTEX(trace_eval_mutex);
144
145 /*
146 * The trace_eval_maps are saved in an array with two extra elements,
147 * one at the beginning, and one at the end. The beginning item contains
148 * the count of the saved maps (head.length), and the module they
149 * belong to if not built in (head.mod). The ending item contains a
150 * pointer to the next array of saved eval_map items.
151 */
152 union trace_eval_map_item {
153 struct trace_eval_map map;
154 struct trace_eval_map_head head;
155 struct trace_eval_map_tail tail;
156 };
157
158 static union trace_eval_map_item *trace_eval_maps;
159 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
160
161 static int tracing_set_tracer(struct trace_array *tr, const char *buf);
162
163 #define MAX_TRACER_SIZE 100
164 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
165 static char *default_bootup_tracer;
166
167 static bool allocate_snapshot;
168
169 static int __init set_cmdline_ftrace(char *str)
170 {
171 strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
172 default_bootup_tracer = bootup_tracer_buf;
173 /* We are using ftrace early, expand it */
174 ring_buffer_expanded = true;
175 return 1;
176 }
177 __setup("ftrace=", set_cmdline_ftrace);
178
179 static int __init set_ftrace_dump_on_oops(char *str)
180 {
181 if (*str++ != '=' || !*str) {
182 ftrace_dump_on_oops = DUMP_ALL;
183 return 1;
184 }
185
186 if (!strcmp("orig_cpu", str)) {
187 ftrace_dump_on_oops = DUMP_ORIG;
188 return 1;
189 }
190
191 return 0;
192 }
193 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
194
195 static int __init stop_trace_on_warning(char *str)
196 {
197 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
198 __disable_trace_on_warning = 1;
199 return 1;
200 }
201 __setup("traceoff_on_warning", stop_trace_on_warning);
202
203 static int __init boot_alloc_snapshot(char *str)
204 {
205 allocate_snapshot = true;
206 /* We also need the main ring buffer expanded */
207 ring_buffer_expanded = true;
208 return 1;
209 }
210 __setup("alloc_snapshot", boot_alloc_snapshot);
211
212
213 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
214
215 static int __init set_trace_boot_options(char *str)
216 {
217 strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
218 return 0;
219 }
220 __setup("trace_options=", set_trace_boot_options);
221
222 static char trace_boot_clock_buf[MAX_TRACER_SIZE] __initdata;
223 static char *trace_boot_clock __initdata;
224
225 static int __init set_trace_boot_clock(char *str)
226 {
227 strlcpy(trace_boot_clock_buf, str, MAX_TRACER_SIZE);
228 trace_boot_clock = trace_boot_clock_buf;
229 return 0;
230 }
231 __setup("trace_clock=", set_trace_boot_clock);
232
233 static int __init set_tracepoint_printk(char *str)
234 {
235 if ((strcmp(str, "=0") != 0 && strcmp(str, "=off") != 0))
236 tracepoint_printk = 1;
237 return 1;
238 }
239 __setup("tp_printk", set_tracepoint_printk);
240
241 unsigned long long ns2usecs(u64 nsec)
242 {
243 nsec += 500;
244 do_div(nsec, 1000);
245 return nsec;
246 }
247
248 /* trace_flags holds trace_options default values */
249 #define TRACE_DEFAULT_FLAGS \
250 (FUNCTION_DEFAULT_FLAGS | \
251 TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | \
252 TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | \
253 TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE | \
254 TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS)
255
256 /* trace_options that are only supported by global_trace */
257 #define TOP_LEVEL_TRACE_FLAGS (TRACE_ITER_PRINTK | \
258 TRACE_ITER_PRINTK_MSGONLY | TRACE_ITER_RECORD_CMD)
259
260 /* trace_flags that are default zero for instances */
261 #define ZEROED_TRACE_FLAGS \
262 (TRACE_ITER_EVENT_FORK | TRACE_ITER_FUNC_FORK)
263
264 /*
265 * The global_trace is the descriptor that holds the top-level tracing
266 * buffers for the live tracing.
267 */
268 static struct trace_array global_trace = {
269 .trace_flags = TRACE_DEFAULT_FLAGS,
270 };
271
272 LIST_HEAD(ftrace_trace_arrays);
273
274 int trace_array_get(struct trace_array *this_tr)
275 {
276 struct trace_array *tr;
277 int ret = -ENODEV;
278
279 mutex_lock(&trace_types_lock);
280 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
281 if (tr == this_tr) {
282 tr->ref++;
283 ret = 0;
284 break;
285 }
286 }
287 mutex_unlock(&trace_types_lock);
288
289 return ret;
290 }
291
292 static void __trace_array_put(struct trace_array *this_tr)
293 {
294 WARN_ON(!this_tr->ref);
295 this_tr->ref--;
296 }
297
298 void trace_array_put(struct trace_array *this_tr)
299 {
300 mutex_lock(&trace_types_lock);
301 __trace_array_put(this_tr);
302 mutex_unlock(&trace_types_lock);
303 }
304
305 int call_filter_check_discard(struct trace_event_call *call, void *rec,
306 struct ring_buffer *buffer,
307 struct ring_buffer_event *event)
308 {
309 if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
310 !filter_match_preds(call->filter, rec)) {
311 __trace_event_discard_commit(buffer, event);
312 return 1;
313 }
314
315 return 0;
316 }
317
318 void trace_free_pid_list(struct trace_pid_list *pid_list)
319 {
320 vfree(pid_list->pids);
321 kfree(pid_list);
322 }
323
324 /**
325 * trace_find_filtered_pid - check if a pid exists in a filtered_pid list
326 * @filtered_pids: The list of pids to check
327 * @search_pid: The PID to find in @filtered_pids
328 *
329 * Returns true if @search_pid is fonud in @filtered_pids, and false otherwis.
330 */
331 bool
332 trace_find_filtered_pid(struct trace_pid_list *filtered_pids, pid_t search_pid)
333 {
334 /*
335 * If pid_max changed after filtered_pids was created, we
336 * by default ignore all pids greater than the previous pid_max.
337 */
338 if (search_pid >= filtered_pids->pid_max)
339 return false;
340
341 return test_bit(search_pid, filtered_pids->pids);
342 }
343
344 /**
345 * trace_ignore_this_task - should a task be ignored for tracing
346 * @filtered_pids: The list of pids to check
347 * @task: The task that should be ignored if not filtered
348 *
349 * Checks if @task should be traced or not from @filtered_pids.
350 * Returns true if @task should *NOT* be traced.
351 * Returns false if @task should be traced.
352 */
353 bool
354 trace_ignore_this_task(struct trace_pid_list *filtered_pids, struct task_struct *task)
355 {
356 /*
357 * Return false, because if filtered_pids does not exist,
358 * all pids are good to trace.
359 */
360 if (!filtered_pids)
361 return false;
362
363 return !trace_find_filtered_pid(filtered_pids, task->pid);
364 }
365
366 /**
367 * trace_pid_filter_add_remove_task - Add or remove a task from a pid_list
368 * @pid_list: The list to modify
369 * @self: The current task for fork or NULL for exit
370 * @task: The task to add or remove
371 *
372 * If adding a task, if @self is defined, the task is only added if @self
373 * is also included in @pid_list. This happens on fork and tasks should
374 * only be added when the parent is listed. If @self is NULL, then the
375 * @task pid will be removed from the list, which would happen on exit
376 * of a task.
377 */
378 void trace_filter_add_remove_task(struct trace_pid_list *pid_list,
379 struct task_struct *self,
380 struct task_struct *task)
381 {
382 if (!pid_list)
383 return;
384
385 /* For forks, we only add if the forking task is listed */
386 if (self) {
387 if (!trace_find_filtered_pid(pid_list, self->pid))
388 return;
389 }
390
391 /* Sorry, but we don't support pid_max changing after setting */
392 if (task->pid >= pid_list->pid_max)
393 return;
394
395 /* "self" is set for forks, and NULL for exits */
396 if (self)
397 set_bit(task->pid, pid_list->pids);
398 else
399 clear_bit(task->pid, pid_list->pids);
400 }
401
402 /**
403 * trace_pid_next - Used for seq_file to get to the next pid of a pid_list
404 * @pid_list: The pid list to show
405 * @v: The last pid that was shown (+1 the actual pid to let zero be displayed)
406 * @pos: The position of the file
407 *
408 * This is used by the seq_file "next" operation to iterate the pids
409 * listed in a trace_pid_list structure.
410 *
411 * Returns the pid+1 as we want to display pid of zero, but NULL would
412 * stop the iteration.
413 */
414 void *trace_pid_next(struct trace_pid_list *pid_list, void *v, loff_t *pos)
415 {
416 unsigned long pid = (unsigned long)v;
417
418 (*pos)++;
419
420 /* pid already is +1 of the actual prevous bit */
421 pid = find_next_bit(pid_list->pids, pid_list->pid_max, pid);
422
423 /* Return pid + 1 to allow zero to be represented */
424 if (pid < pid_list->pid_max)
425 return (void *)(pid + 1);
426
427 return NULL;
428 }
429
430 /**
431 * trace_pid_start - Used for seq_file to start reading pid lists
432 * @pid_list: The pid list to show
433 * @pos: The position of the file
434 *
435 * This is used by seq_file "start" operation to start the iteration
436 * of listing pids.
437 *
438 * Returns the pid+1 as we want to display pid of zero, but NULL would
439 * stop the iteration.
440 */
441 void *trace_pid_start(struct trace_pid_list *pid_list, loff_t *pos)
442 {
443 unsigned long pid;
444 loff_t l = 0;
445
446 pid = find_first_bit(pid_list->pids, pid_list->pid_max);
447 if (pid >= pid_list->pid_max)
448 return NULL;
449
450 /* Return pid + 1 so that zero can be the exit value */
451 for (pid++; pid && l < *pos;
452 pid = (unsigned long)trace_pid_next(pid_list, (void *)pid, &l))
453 ;
454 return (void *)pid;
455 }
456
457 /**
458 * trace_pid_show - show the current pid in seq_file processing
459 * @m: The seq_file structure to write into
460 * @v: A void pointer of the pid (+1) value to display
461 *
462 * Can be directly used by seq_file operations to display the current
463 * pid value.
464 */
465 int trace_pid_show(struct seq_file *m, void *v)
466 {
467 unsigned long pid = (unsigned long)v - 1;
468
469 seq_printf(m, "%lu\n", pid);
470 return 0;
471 }
472
473 /* 128 should be much more than enough */
474 #define PID_BUF_SIZE 127
475
476 int trace_pid_write(struct trace_pid_list *filtered_pids,
477 struct trace_pid_list **new_pid_list,
478 const char __user *ubuf, size_t cnt)
479 {
480 struct trace_pid_list *pid_list;
481 struct trace_parser parser;
482 unsigned long val;
483 int nr_pids = 0;
484 ssize_t read = 0;
485 ssize_t ret = 0;
486 loff_t pos;
487 pid_t pid;
488
489 if (trace_parser_get_init(&parser, PID_BUF_SIZE + 1))
490 return -ENOMEM;
491
492 /*
493 * Always recreate a new array. The write is an all or nothing
494 * operation. Always create a new array when adding new pids by
495 * the user. If the operation fails, then the current list is
496 * not modified.
497 */
498 pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
499 if (!pid_list)
500 return -ENOMEM;
501
502 pid_list->pid_max = READ_ONCE(pid_max);
503
504 /* Only truncating will shrink pid_max */
505 if (filtered_pids && filtered_pids->pid_max > pid_list->pid_max)
506 pid_list->pid_max = filtered_pids->pid_max;
507
508 pid_list->pids = vzalloc((pid_list->pid_max + 7) >> 3);
509 if (!pid_list->pids) {
510 kfree(pid_list);
511 return -ENOMEM;
512 }
513
514 if (filtered_pids) {
515 /* copy the current bits to the new max */
516 for_each_set_bit(pid, filtered_pids->pids,
517 filtered_pids->pid_max) {
518 set_bit(pid, pid_list->pids);
519 nr_pids++;
520 }
521 }
522
523 while (cnt > 0) {
524
525 pos = 0;
526
527 ret = trace_get_user(&parser, ubuf, cnt, &pos);
528 if (ret < 0 || !trace_parser_loaded(&parser))
529 break;
530
531 read += ret;
532 ubuf += ret;
533 cnt -= ret;
534
535 ret = -EINVAL;
536 if (kstrtoul(parser.buffer, 0, &val))
537 break;
538 if (val >= pid_list->pid_max)
539 break;
540
541 pid = (pid_t)val;
542
543 set_bit(pid, pid_list->pids);
544 nr_pids++;
545
546 trace_parser_clear(&parser);
547 ret = 0;
548 }
549 trace_parser_put(&parser);
550
551 if (ret < 0) {
552 trace_free_pid_list(pid_list);
553 return ret;
554 }
555
556 if (!nr_pids) {
557 /* Cleared the list of pids */
558 trace_free_pid_list(pid_list);
559 read = ret;
560 pid_list = NULL;
561 }
562
563 *new_pid_list = pid_list;
564
565 return read;
566 }
567
568 static u64 buffer_ftrace_now(struct trace_buffer *buf, int cpu)
569 {
570 u64 ts;
571
572 /* Early boot up does not have a buffer yet */
573 if (!buf->buffer)
574 return trace_clock_local();
575
576 ts = ring_buffer_time_stamp(buf->buffer, cpu);
577 ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
578
579 return ts;
580 }
581
582 u64 ftrace_now(int cpu)
583 {
584 return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
585 }
586
587 /**
588 * tracing_is_enabled - Show if global_trace has been disabled
589 *
590 * Shows if the global trace has been enabled or not. It uses the
591 * mirror flag "buffer_disabled" to be used in fast paths such as for
592 * the irqsoff tracer. But it may be inaccurate due to races. If you
593 * need to know the accurate state, use tracing_is_on() which is a little
594 * slower, but accurate.
595 */
596 int tracing_is_enabled(void)
597 {
598 /*
599 * For quick access (irqsoff uses this in fast path), just
600 * return the mirror variable of the state of the ring buffer.
601 * It's a little racy, but we don't really care.
602 */
603 smp_rmb();
604 return !global_trace.buffer_disabled;
605 }
606
607 /*
608 * trace_buf_size is the size in bytes that is allocated
609 * for a buffer. Note, the number of bytes is always rounded
610 * to page size.
611 *
612 * This number is purposely set to a low number of 16384.
613 * If the dump on oops happens, it will be much appreciated
614 * to not have to wait for all that output. Anyway this can be
615 * boot time and run time configurable.
616 */
617 #define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
618
619 static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
620
621 /* trace_types holds a link list of available tracers. */
622 static struct tracer *trace_types __read_mostly;
623
624 /*
625 * trace_types_lock is used to protect the trace_types list.
626 */
627 DEFINE_MUTEX(trace_types_lock);
628
629 /*
630 * serialize the access of the ring buffer
631 *
632 * ring buffer serializes readers, but it is low level protection.
633 * The validity of the events (which returns by ring_buffer_peek() ..etc)
634 * are not protected by ring buffer.
635 *
636 * The content of events may become garbage if we allow other process consumes
637 * these events concurrently:
638 * A) the page of the consumed events may become a normal page
639 * (not reader page) in ring buffer, and this page will be rewrited
640 * by events producer.
641 * B) The page of the consumed events may become a page for splice_read,
642 * and this page will be returned to system.
643 *
644 * These primitives allow multi process access to different cpu ring buffer
645 * concurrently.
646 *
647 * These primitives don't distinguish read-only and read-consume access.
648 * Multi read-only access are also serialized.
649 */
650
651 #ifdef CONFIG_SMP
652 static DECLARE_RWSEM(all_cpu_access_lock);
653 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
654
655 static inline void trace_access_lock(int cpu)
656 {
657 if (cpu == RING_BUFFER_ALL_CPUS) {
658 /* gain it for accessing the whole ring buffer. */
659 down_write(&all_cpu_access_lock);
660 } else {
661 /* gain it for accessing a cpu ring buffer. */
662
663 /* Firstly block other trace_access_lock(RING_BUFFER_ALL_CPUS). */
664 down_read(&all_cpu_access_lock);
665
666 /* Secondly block other access to this @cpu ring buffer. */
667 mutex_lock(&per_cpu(cpu_access_lock, cpu));
668 }
669 }
670
671 static inline void trace_access_unlock(int cpu)
672 {
673 if (cpu == RING_BUFFER_ALL_CPUS) {
674 up_write(&all_cpu_access_lock);
675 } else {
676 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
677 up_read(&all_cpu_access_lock);
678 }
679 }
680
681 static inline void trace_access_lock_init(void)
682 {
683 int cpu;
684
685 for_each_possible_cpu(cpu)
686 mutex_init(&per_cpu(cpu_access_lock, cpu));
687 }
688
689 #else
690
691 static DEFINE_MUTEX(access_lock);
692
693 static inline void trace_access_lock(int cpu)
694 {
695 (void)cpu;
696 mutex_lock(&access_lock);
697 }
698
699 static inline void trace_access_unlock(int cpu)
700 {
701 (void)cpu;
702 mutex_unlock(&access_lock);
703 }
704
705 static inline void trace_access_lock_init(void)
706 {
707 }
708
709 #endif
710
711 #ifdef CONFIG_STACKTRACE
712 static void __ftrace_trace_stack(struct ring_buffer *buffer,
713 unsigned long flags,
714 int skip, int pc, struct pt_regs *regs);
715 static inline void ftrace_trace_stack(struct trace_array *tr,
716 struct ring_buffer *buffer,
717 unsigned long flags,
718 int skip, int pc, struct pt_regs *regs);
719
720 #else
721 static inline void __ftrace_trace_stack(struct ring_buffer *buffer,
722 unsigned long flags,
723 int skip, int pc, struct pt_regs *regs)
724 {
725 }
726 static inline void ftrace_trace_stack(struct trace_array *tr,
727 struct ring_buffer *buffer,
728 unsigned long flags,
729 int skip, int pc, struct pt_regs *regs)
730 {
731 }
732
733 #endif
734
735 static __always_inline void
736 trace_event_setup(struct ring_buffer_event *event,
737 int type, unsigned long flags, int pc)
738 {
739 struct trace_entry *ent = ring_buffer_event_data(event);
740
741 tracing_generic_entry_update(ent, flags, pc);
742 ent->type = type;
743 }
744
745 static __always_inline struct ring_buffer_event *
746 __trace_buffer_lock_reserve(struct ring_buffer *buffer,
747 int type,
748 unsigned long len,
749 unsigned long flags, int pc)
750 {
751 struct ring_buffer_event *event;
752
753 event = ring_buffer_lock_reserve(buffer, len);
754 if (event != NULL)
755 trace_event_setup(event, type, flags, pc);
756
757 return event;
758 }
759
760 void tracer_tracing_on(struct trace_array *tr)
761 {
762 if (tr->trace_buffer.buffer)
763 ring_buffer_record_on(tr->trace_buffer.buffer);
764 /*
765 * This flag is looked at when buffers haven't been allocated
766 * yet, or by some tracers (like irqsoff), that just want to
767 * know if the ring buffer has been disabled, but it can handle
768 * races of where it gets disabled but we still do a record.
769 * As the check is in the fast path of the tracers, it is more
770 * important to be fast than accurate.
771 */
772 tr->buffer_disabled = 0;
773 /* Make the flag seen by readers */
774 smp_wmb();
775 }
776
777 /**
778 * tracing_on - enable tracing buffers
779 *
780 * This function enables tracing buffers that may have been
781 * disabled with tracing_off.
782 */
783 void tracing_on(void)
784 {
785 tracer_tracing_on(&global_trace);
786 }
787 EXPORT_SYMBOL_GPL(tracing_on);
788
789
790 static __always_inline void
791 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
792 {
793 __this_cpu_write(trace_taskinfo_save, true);
794
795 /* If this is the temp buffer, we need to commit fully */
796 if (this_cpu_read(trace_buffered_event) == event) {
797 /* Length is in event->array[0] */
798 ring_buffer_write(buffer, event->array[0], &event->array[1]);
799 /* Release the temp buffer */
800 this_cpu_dec(trace_buffered_event_cnt);
801 } else
802 ring_buffer_unlock_commit(buffer, event);
803 }
804
805 /**
806 * __trace_puts - write a constant string into the trace buffer.
807 * @ip: The address of the caller
808 * @str: The constant string to write
809 * @size: The size of the string.
810 */
811 int __trace_puts(unsigned long ip, const char *str, int size)
812 {
813 struct ring_buffer_event *event;
814 struct ring_buffer *buffer;
815 struct print_entry *entry;
816 unsigned long irq_flags;
817 int alloc;
818 int pc;
819
820 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
821 return 0;
822
823 pc = preempt_count();
824
825 if (unlikely(tracing_selftest_running || tracing_disabled))
826 return 0;
827
828 alloc = sizeof(*entry) + size + 2; /* possible \n added */
829
830 local_save_flags(irq_flags);
831 buffer = global_trace.trace_buffer.buffer;
832 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
833 irq_flags, pc);
834 if (!event)
835 return 0;
836
837 entry = ring_buffer_event_data(event);
838 entry->ip = ip;
839
840 memcpy(&entry->buf, str, size);
841
842 /* Add a newline if necessary */
843 if (entry->buf[size - 1] != '\n') {
844 entry->buf[size] = '\n';
845 entry->buf[size + 1] = '\0';
846 } else
847 entry->buf[size] = '\0';
848
849 __buffer_unlock_commit(buffer, event);
850 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
851
852 return size;
853 }
854 EXPORT_SYMBOL_GPL(__trace_puts);
855
856 /**
857 * __trace_bputs - write the pointer to a constant string into trace buffer
858 * @ip: The address of the caller
859 * @str: The constant string to write to the buffer to
860 */
861 int __trace_bputs(unsigned long ip, const char *str)
862 {
863 struct ring_buffer_event *event;
864 struct ring_buffer *buffer;
865 struct bputs_entry *entry;
866 unsigned long irq_flags;
867 int size = sizeof(struct bputs_entry);
868 int pc;
869
870 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
871 return 0;
872
873 pc = preempt_count();
874
875 if (unlikely(tracing_selftest_running || tracing_disabled))
876 return 0;
877
878 local_save_flags(irq_flags);
879 buffer = global_trace.trace_buffer.buffer;
880 event = __trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
881 irq_flags, pc);
882 if (!event)
883 return 0;
884
885 entry = ring_buffer_event_data(event);
886 entry->ip = ip;
887 entry->str = str;
888
889 __buffer_unlock_commit(buffer, event);
890 ftrace_trace_stack(&global_trace, buffer, irq_flags, 4, pc, NULL);
891
892 return 1;
893 }
894 EXPORT_SYMBOL_GPL(__trace_bputs);
895
896 #ifdef CONFIG_TRACER_SNAPSHOT
897 void tracing_snapshot_instance(struct trace_array *tr)
898 {
899 struct tracer *tracer = tr->current_trace;
900 unsigned long flags;
901
902 if (in_nmi()) {
903 internal_trace_puts("*** SNAPSHOT CALLED FROM NMI CONTEXT ***\n");
904 internal_trace_puts("*** snapshot is being ignored ***\n");
905 return;
906 }
907
908 if (!tr->allocated_snapshot) {
909 internal_trace_puts("*** SNAPSHOT NOT ALLOCATED ***\n");
910 internal_trace_puts("*** stopping trace here! ***\n");
911 tracing_off();
912 return;
913 }
914
915 /* Note, snapshot can not be used when the tracer uses it */
916 if (tracer->use_max_tr) {
917 internal_trace_puts("*** LATENCY TRACER ACTIVE ***\n");
918 internal_trace_puts("*** Can not use snapshot (sorry) ***\n");
919 return;
920 }
921
922 local_irq_save(flags);
923 update_max_tr(tr, current, smp_processor_id());
924 local_irq_restore(flags);
925 }
926
927 /**
928 * tracing_snapshot - take a snapshot of the current buffer.
929 *
930 * This causes a swap between the snapshot buffer and the current live
931 * tracing buffer. You can use this to take snapshots of the live
932 * trace when some condition is triggered, but continue to trace.
933 *
934 * Note, make sure to allocate the snapshot with either
935 * a tracing_snapshot_alloc(), or by doing it manually
936 * with: echo 1 > /sys/kernel/debug/tracing/snapshot
937 *
938 * If the snapshot buffer is not allocated, it will stop tracing.
939 * Basically making a permanent snapshot.
940 */
941 void tracing_snapshot(void)
942 {
943 struct trace_array *tr = &global_trace;
944
945 tracing_snapshot_instance(tr);
946 }
947 EXPORT_SYMBOL_GPL(tracing_snapshot);
948
949 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
950 struct trace_buffer *size_buf, int cpu_id);
951 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
952
953 int tracing_alloc_snapshot_instance(struct trace_array *tr)
954 {
955 int ret;
956
957 if (!tr->allocated_snapshot) {
958
959 /* allocate spare buffer */
960 ret = resize_buffer_duplicate_size(&tr->max_buffer,
961 &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
962 if (ret < 0)
963 return ret;
964
965 tr->allocated_snapshot = true;
966 }
967
968 return 0;
969 }
970
971 static void free_snapshot(struct trace_array *tr)
972 {
973 /*
974 * We don't free the ring buffer. instead, resize it because
975 * The max_tr ring buffer has some state (e.g. ring->clock) and
976 * we want preserve it.
977 */
978 ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
979 set_buffer_entries(&tr->max_buffer, 1);
980 tracing_reset_online_cpus(&tr->max_buffer);
981 tr->allocated_snapshot = false;
982 }
983
984 /**
985 * tracing_alloc_snapshot - allocate snapshot buffer.
986 *
987 * This only allocates the snapshot buffer if it isn't already
988 * allocated - it doesn't also take a snapshot.
989 *
990 * This is meant to be used in cases where the snapshot buffer needs
991 * to be set up for events that can't sleep but need to be able to
992 * trigger a snapshot.
993 */
994 int tracing_alloc_snapshot(void)
995 {
996 struct trace_array *tr = &global_trace;
997 int ret;
998
999 ret = tracing_alloc_snapshot_instance(tr);
1000 WARN_ON(ret < 0);
1001
1002 return ret;
1003 }
1004 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1005
1006 /**
1007 * tracing_snapshot_alloc - allocate and take a snapshot of the current buffer.
1008 *
1009 * This is similar to tracing_snapshot(), but it will allocate the
1010 * snapshot buffer if it isn't already allocated. Use this only
1011 * where it is safe to sleep, as the allocation may sleep.
1012 *
1013 * This causes a swap between the snapshot buffer and the current live
1014 * tracing buffer. You can use this to take snapshots of the live
1015 * trace when some condition is triggered, but continue to trace.
1016 */
1017 void tracing_snapshot_alloc(void)
1018 {
1019 int ret;
1020
1021 ret = tracing_alloc_snapshot();
1022 if (ret < 0)
1023 return;
1024
1025 tracing_snapshot();
1026 }
1027 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1028 #else
1029 void tracing_snapshot(void)
1030 {
1031 WARN_ONCE(1, "Snapshot feature not enabled, but internal snapshot used");
1032 }
1033 EXPORT_SYMBOL_GPL(tracing_snapshot);
1034 int tracing_alloc_snapshot(void)
1035 {
1036 WARN_ONCE(1, "Snapshot feature not enabled, but snapshot allocation used");
1037 return -ENODEV;
1038 }
1039 EXPORT_SYMBOL_GPL(tracing_alloc_snapshot);
1040 void tracing_snapshot_alloc(void)
1041 {
1042 /* Give warning */
1043 tracing_snapshot();
1044 }
1045 EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
1046 #endif /* CONFIG_TRACER_SNAPSHOT */
1047
1048 void tracer_tracing_off(struct trace_array *tr)
1049 {
1050 if (tr->trace_buffer.buffer)
1051 ring_buffer_record_off(tr->trace_buffer.buffer);
1052 /*
1053 * This flag is looked at when buffers haven't been allocated
1054 * yet, or by some tracers (like irqsoff), that just want to
1055 * know if the ring buffer has been disabled, but it can handle
1056 * races of where it gets disabled but we still do a record.
1057 * As the check is in the fast path of the tracers, it is more
1058 * important to be fast than accurate.
1059 */
1060 tr->buffer_disabled = 1;
1061 /* Make the flag seen by readers */
1062 smp_wmb();
1063 }
1064
1065 /**
1066 * tracing_off - turn off tracing buffers
1067 *
1068 * This function stops the tracing buffers from recording data.
1069 * It does not disable any overhead the tracers themselves may
1070 * be causing. This function simply causes all recording to
1071 * the ring buffers to fail.
1072 */
1073 void tracing_off(void)
1074 {
1075 tracer_tracing_off(&global_trace);
1076 }
1077 EXPORT_SYMBOL_GPL(tracing_off);
1078
1079 void disable_trace_on_warning(void)
1080 {
1081 if (__disable_trace_on_warning)
1082 tracing_off();
1083 }
1084
1085 /**
1086 * tracer_tracing_is_on - show real state of ring buffer enabled
1087 * @tr : the trace array to know if ring buffer is enabled
1088 *
1089 * Shows real state of the ring buffer if it is enabled or not.
1090 */
1091 bool tracer_tracing_is_on(struct trace_array *tr)
1092 {
1093 if (tr->trace_buffer.buffer)
1094 return ring_buffer_record_is_on(tr->trace_buffer.buffer);
1095 return !tr->buffer_disabled;
1096 }
1097
1098 /**
1099 * tracing_is_on - show state of ring buffers enabled
1100 */
1101 int tracing_is_on(void)
1102 {
1103 return tracer_tracing_is_on(&global_trace);
1104 }
1105 EXPORT_SYMBOL_GPL(tracing_is_on);
1106
1107 static int __init set_buf_size(char *str)
1108 {
1109 unsigned long buf_size;
1110
1111 if (!str)
1112 return 0;
1113 buf_size = memparse(str, &str);
1114 /* nr_entries can not be zero */
1115 if (buf_size == 0)
1116 return 0;
1117 trace_buf_size = buf_size;
1118 return 1;
1119 }
1120 __setup("trace_buf_size=", set_buf_size);
1121
1122 static int __init set_tracing_thresh(char *str)
1123 {
1124 unsigned long threshold;
1125 int ret;
1126
1127 if (!str)
1128 return 0;
1129 ret = kstrtoul(str, 0, &threshold);
1130 if (ret < 0)
1131 return 0;
1132 tracing_thresh = threshold * 1000;
1133 return 1;
1134 }
1135 __setup("tracing_thresh=", set_tracing_thresh);
1136
1137 unsigned long nsecs_to_usecs(unsigned long nsecs)
1138 {
1139 return nsecs / 1000;
1140 }
1141
1142 /*
1143 * TRACE_FLAGS is defined as a tuple matching bit masks with strings.
1144 * It uses C(a, b) where 'a' is the eval (enum) name and 'b' is the string that
1145 * matches it. By defining "C(a, b) b", TRACE_FLAGS becomes a list
1146 * of strings in the order that the evals (enum) were defined.
1147 */
1148 #undef C
1149 #define C(a, b) b
1150
1151 /* These must match the bit postions in trace_iterator_flags */
1152 static const char *trace_options[] = {
1153 TRACE_FLAGS
1154 NULL
1155 };
1156
1157 static struct {
1158 u64 (*func)(void);
1159 const char *name;
1160 int in_ns; /* is this clock in nanoseconds? */
1161 } trace_clocks[] = {
1162 { trace_clock_local, "local", 1 },
1163 { trace_clock_global, "global", 1 },
1164 { trace_clock_counter, "counter", 0 },
1165 { trace_clock_jiffies, "uptime", 0 },
1166 { trace_clock, "perf", 1 },
1167 { ktime_get_mono_fast_ns, "mono", 1 },
1168 { ktime_get_raw_fast_ns, "mono_raw", 1 },
1169 { ktime_get_boot_fast_ns, "boot", 1 },
1170 ARCH_TRACE_CLOCKS
1171 };
1172
1173 bool trace_clock_in_ns(struct trace_array *tr)
1174 {
1175 if (trace_clocks[tr->clock_id].in_ns)
1176 return true;
1177
1178 return false;
1179 }
1180
1181 /*
1182 * trace_parser_get_init - gets the buffer for trace parser
1183 */
1184 int trace_parser_get_init(struct trace_parser *parser, int size)
1185 {
1186 memset(parser, 0, sizeof(*parser));
1187
1188 parser->buffer = kmalloc(size, GFP_KERNEL);
1189 if (!parser->buffer)
1190 return 1;
1191
1192 parser->size = size;
1193 return 0;
1194 }
1195
1196 /*
1197 * trace_parser_put - frees the buffer for trace parser
1198 */
1199 void trace_parser_put(struct trace_parser *parser)
1200 {
1201 kfree(parser->buffer);
1202 parser->buffer = NULL;
1203 }
1204
1205 /*
1206 * trace_get_user - reads the user input string separated by space
1207 * (matched by isspace(ch))
1208 *
1209 * For each string found the 'struct trace_parser' is updated,
1210 * and the function returns.
1211 *
1212 * Returns number of bytes read.
1213 *
1214 * See kernel/trace/trace.h for 'struct trace_parser' details.
1215 */
1216 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
1217 size_t cnt, loff_t *ppos)
1218 {
1219 char ch;
1220 size_t read = 0;
1221 ssize_t ret;
1222
1223 if (!*ppos)
1224 trace_parser_clear(parser);
1225
1226 ret = get_user(ch, ubuf++);
1227 if (ret)
1228 goto out;
1229
1230 read++;
1231 cnt--;
1232
1233 /*
1234 * The parser is not finished with the last write,
1235 * continue reading the user input without skipping spaces.
1236 */
1237 if (!parser->cont) {
1238 /* skip white space */
1239 while (cnt && isspace(ch)) {
1240 ret = get_user(ch, ubuf++);
1241 if (ret)
1242 goto out;
1243 read++;
1244 cnt--;
1245 }
1246
1247 parser->idx = 0;
1248
1249 /* only spaces were written */
1250 if (isspace(ch) || !ch) {
1251 *ppos += read;
1252 ret = read;
1253 goto out;
1254 }
1255 }
1256
1257 /* read the non-space input */
1258 while (cnt && !isspace(ch) && ch) {
1259 if (parser->idx < parser->size - 1)
1260 parser->buffer[parser->idx++] = ch;
1261 else {
1262 ret = -EINVAL;
1263 goto out;
1264 }
1265 ret = get_user(ch, ubuf++);
1266 if (ret)
1267 goto out;
1268 read++;
1269 cnt--;
1270 }
1271
1272 /* We either got finished input or we have to wait for another call. */
1273 if (isspace(ch) || !ch) {
1274 parser->buffer[parser->idx] = 0;
1275 parser->cont = false;
1276 } else if (parser->idx < parser->size - 1) {
1277 parser->cont = true;
1278 parser->buffer[parser->idx++] = ch;
1279 /* Make sure the parsed string always terminates with '\0'. */
1280 parser->buffer[parser->idx] = 0;
1281 } else {
1282 ret = -EINVAL;
1283 goto out;
1284 }
1285
1286 *ppos += read;
1287 ret = read;
1288
1289 out:
1290 return ret;
1291 }
1292
1293 /* TODO add a seq_buf_to_buffer() */
1294 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
1295 {
1296 int len;
1297
1298 if (trace_seq_used(s) <= s->seq.readpos)
1299 return -EBUSY;
1300
1301 len = trace_seq_used(s) - s->seq.readpos;
1302 if (cnt > len)
1303 cnt = len;
1304 memcpy(buf, s->buffer + s->seq.readpos, cnt);
1305
1306 s->seq.readpos += cnt;
1307 return cnt;
1308 }
1309
1310 unsigned long __read_mostly tracing_thresh;
1311
1312 #ifdef CONFIG_TRACER_MAX_TRACE
1313 /*
1314 * Copy the new maximum trace into the separate maximum-trace
1315 * structure. (this way the maximum trace is permanently saved,
1316 * for later retrieval via /sys/kernel/tracing/tracing_max_latency)
1317 */
1318 static void
1319 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1320 {
1321 struct trace_buffer *trace_buf = &tr->trace_buffer;
1322 struct trace_buffer *max_buf = &tr->max_buffer;
1323 struct trace_array_cpu *data = per_cpu_ptr(trace_buf->data, cpu);
1324 struct trace_array_cpu *max_data = per_cpu_ptr(max_buf->data, cpu);
1325
1326 max_buf->cpu = cpu;
1327 max_buf->time_start = data->preempt_timestamp;
1328
1329 max_data->saved_latency = tr->max_latency;
1330 max_data->critical_start = data->critical_start;
1331 max_data->critical_end = data->critical_end;
1332
1333 memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
1334 max_data->pid = tsk->pid;
1335 /*
1336 * If tsk == current, then use current_uid(), as that does not use
1337 * RCU. The irq tracer can be called out of RCU scope.
1338 */
1339 if (tsk == current)
1340 max_data->uid = current_uid();
1341 else
1342 max_data->uid = task_uid(tsk);
1343
1344 max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
1345 max_data->policy = tsk->policy;
1346 max_data->rt_priority = tsk->rt_priority;
1347
1348 /* record this tasks comm */
1349 tracing_record_cmdline(tsk);
1350 }
1351
1352 /**
1353 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
1354 * @tr: tracer
1355 * @tsk: the task with the latency
1356 * @cpu: The cpu that initiated the trace.
1357 *
1358 * Flip the buffers between the @tr and the max_tr and record information
1359 * about which task was the cause of this latency.
1360 */
1361 void
1362 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
1363 {
1364 if (tr->stop_count)
1365 return;
1366
1367 WARN_ON_ONCE(!irqs_disabled());
1368
1369 if (!tr->allocated_snapshot) {
1370 /* Only the nop tracer should hit this when disabling */
1371 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1372 return;
1373 }
1374
1375 arch_spin_lock(&tr->max_lock);
1376
1377 /* Inherit the recordable setting from trace_buffer */
1378 if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
1379 ring_buffer_record_on(tr->max_buffer.buffer);
1380 else
1381 ring_buffer_record_off(tr->max_buffer.buffer);
1382
1383 swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
1384
1385 __update_max_tr(tr, tsk, cpu);
1386 arch_spin_unlock(&tr->max_lock);
1387 }
1388
1389 /**
1390 * update_max_tr_single - only copy one trace over, and reset the rest
1391 * @tr - tracer
1392 * @tsk - task with the latency
1393 * @cpu - the cpu of the buffer to copy.
1394 *
1395 * Flip the trace of a single CPU buffer between the @tr and the max_tr.
1396 */
1397 void
1398 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1399 {
1400 int ret;
1401
1402 if (tr->stop_count)
1403 return;
1404
1405 WARN_ON_ONCE(!irqs_disabled());
1406 if (!tr->allocated_snapshot) {
1407 /* Only the nop tracer should hit this when disabling */
1408 WARN_ON_ONCE(tr->current_trace != &nop_trace);
1409 return;
1410 }
1411
1412 arch_spin_lock(&tr->max_lock);
1413
1414 ret = ring_buffer_swap_cpu(tr->max_buffer.buffer, tr->trace_buffer.buffer, cpu);
1415
1416 if (ret == -EBUSY) {
1417 /*
1418 * We failed to swap the buffer due to a commit taking
1419 * place on this CPU. We fail to record, but we reset
1420 * the max trace buffer (no one writes directly to it)
1421 * and flag that it failed.
1422 */
1423 trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_,
1424 "Failed to swap buffers due to commit in progress\n");
1425 }
1426
1427 WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
1428
1429 __update_max_tr(tr, tsk, cpu);
1430 arch_spin_unlock(&tr->max_lock);
1431 }
1432 #endif /* CONFIG_TRACER_MAX_TRACE */
1433
1434 static int wait_on_pipe(struct trace_iterator *iter, int full)
1435 {
1436 /* Iterators are static, they should be filled or empty */
1437 if (trace_buffer_iter(iter, iter->cpu_file))
1438 return 0;
1439
1440 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1441 full);
1442 }
1443
1444 #ifdef CONFIG_FTRACE_STARTUP_TEST
1445 static bool selftests_can_run;
1446
1447 struct trace_selftests {
1448 struct list_head list;
1449 struct tracer *type;
1450 };
1451
1452 static LIST_HEAD(postponed_selftests);
1453
1454 static int save_selftest(struct tracer *type)
1455 {
1456 struct trace_selftests *selftest;
1457
1458 selftest = kmalloc(sizeof(*selftest), GFP_KERNEL);
1459 if (!selftest)
1460 return -ENOMEM;
1461
1462 selftest->type = type;
1463 list_add(&selftest->list, &postponed_selftests);
1464 return 0;
1465 }
1466
1467 static int run_tracer_selftest(struct tracer *type)
1468 {
1469 struct trace_array *tr = &global_trace;
1470 struct tracer *saved_tracer = tr->current_trace;
1471 int ret;
1472
1473 if (!type->selftest || tracing_selftest_disabled)
1474 return 0;
1475
1476 /*
1477 * If a tracer registers early in boot up (before scheduling is
1478 * initialized and such), then do not run its selftests yet.
1479 * Instead, run it a little later in the boot process.
1480 */
1481 if (!selftests_can_run)
1482 return save_selftest(type);
1483
1484 /*
1485 * Run a selftest on this tracer.
1486 * Here we reset the trace buffer, and set the current
1487 * tracer to be this tracer. The tracer can then run some
1488 * internal tracing to verify that everything is in order.
1489 * If we fail, we do not register this tracer.
1490 */
1491 tracing_reset_online_cpus(&tr->trace_buffer);
1492
1493 tr->current_trace = type;
1494
1495 #ifdef CONFIG_TRACER_MAX_TRACE
1496 if (type->use_max_tr) {
1497 /* If we expanded the buffers, make sure the max is expanded too */
1498 if (ring_buffer_expanded)
1499 ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
1500 RING_BUFFER_ALL_CPUS);
1501 tr->allocated_snapshot = true;
1502 }
1503 #endif
1504
1505 /* the test is responsible for initializing and enabling */
1506 pr_info("Testing tracer %s: ", type->name);
1507 ret = type->selftest(type, tr);
1508 /* the test is responsible for resetting too */
1509 tr->current_trace = saved_tracer;
1510 if (ret) {
1511 printk(KERN_CONT "FAILED!\n");
1512 /* Add the warning after printing 'FAILED' */
1513 WARN_ON(1);
1514 return -1;
1515 }
1516 /* Only reset on passing, to avoid touching corrupted buffers */
1517 tracing_reset_online_cpus(&tr->trace_buffer);
1518
1519 #ifdef CONFIG_TRACER_MAX_TRACE
1520 if (type->use_max_tr) {
1521 tr->allocated_snapshot = false;
1522
1523 /* Shrink the max buffer again */
1524 if (ring_buffer_expanded)
1525 ring_buffer_resize(tr->max_buffer.buffer, 1,
1526 RING_BUFFER_ALL_CPUS);
1527 }
1528 #endif
1529
1530 printk(KERN_CONT "PASSED\n");
1531 return 0;
1532 }
1533
1534 static __init int init_trace_selftests(void)
1535 {
1536 struct trace_selftests *p, *n;
1537 struct tracer *t, **last;
1538 int ret;
1539
1540 selftests_can_run = true;
1541
1542 mutex_lock(&trace_types_lock);
1543
1544 if (list_empty(&postponed_selftests))
1545 goto out;
1546
1547 pr_info("Running postponed tracer tests:\n");
1548
1549 list_for_each_entry_safe(p, n, &postponed_selftests, list) {
1550 ret = run_tracer_selftest(p->type);
1551 /* If the test fails, then warn and remove from available_tracers */
1552 if (ret < 0) {
1553 WARN(1, "tracer: %s failed selftest, disabling\n",
1554 p->type->name);
1555 last = &trace_types;
1556 for (t = trace_types; t; t = t->next) {
1557 if (t == p->type) {
1558 *last = t->next;
1559 break;
1560 }
1561 last = &t->next;
1562 }
1563 }
1564 list_del(&p->list);
1565 kfree(p);
1566 }
1567
1568 out:
1569 mutex_unlock(&trace_types_lock);
1570
1571 return 0;
1572 }
1573 core_initcall(init_trace_selftests);
1574 #else
1575 static inline int run_tracer_selftest(struct tracer *type)
1576 {
1577 return 0;
1578 }
1579 #endif /* CONFIG_FTRACE_STARTUP_TEST */
1580
1581 static void add_tracer_options(struct trace_array *tr, struct tracer *t);
1582
1583 static void __init apply_trace_boot_options(void);
1584
1585 /**
1586 * register_tracer - register a tracer with the ftrace system.
1587 * @type - the plugin for the tracer
1588 *
1589 * Register a new plugin tracer.
1590 */
1591 int __init register_tracer(struct tracer *type)
1592 {
1593 struct tracer *t;
1594 int ret = 0;
1595
1596 if (!type->name) {
1597 pr_info("Tracer must have a name\n");
1598 return -1;
1599 }
1600
1601 if (strlen(type->name) >= MAX_TRACER_SIZE) {
1602 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
1603 return -1;
1604 }
1605
1606 mutex_lock(&trace_types_lock);
1607
1608 tracing_selftest_running = true;
1609
1610 for (t = trace_types; t; t = t->next) {
1611 if (strcmp(type->name, t->name) == 0) {
1612 /* already found */
1613 pr_info("Tracer %s already registered\n",
1614 type->name);
1615 ret = -1;
1616 goto out;
1617 }
1618 }
1619
1620 if (!type->set_flag)
1621 type->set_flag = &dummy_set_flag;
1622 if (!type->flags) {
1623 /*allocate a dummy tracer_flags*/
1624 type->flags = kmalloc(sizeof(*type->flags), GFP_KERNEL);
1625 if (!type->flags) {
1626 ret = -ENOMEM;
1627 goto out;
1628 }
1629 type->flags->val = 0;
1630 type->flags->opts = dummy_tracer_opt;
1631 } else
1632 if (!type->flags->opts)
1633 type->flags->opts = dummy_tracer_opt;
1634
1635 /* store the tracer for __set_tracer_option */
1636 type->flags->trace = type;
1637
1638 ret = run_tracer_selftest(type);
1639 if (ret < 0)
1640 goto out;
1641
1642 type->next = trace_types;
1643 trace_types = type;
1644 add_tracer_options(&global_trace, type);
1645
1646 out:
1647 tracing_selftest_running = false;
1648 mutex_unlock(&trace_types_lock);
1649
1650 if (ret || !default_bootup_tracer)
1651 goto out_unlock;
1652
1653 if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
1654 goto out_unlock;
1655
1656 printk(KERN_INFO "Starting tracer '%s'\n", type->name);
1657 /* Do we want this tracer to start on bootup? */
1658 tracing_set_tracer(&global_trace, type->name);
1659 default_bootup_tracer = NULL;
1660
1661 apply_trace_boot_options();
1662
1663 /* disable other selftests, since this will break it. */
1664 tracing_selftest_disabled = true;
1665 #ifdef CONFIG_FTRACE_STARTUP_TEST
1666 printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
1667 type->name);
1668 #endif
1669
1670 out_unlock:
1671 return ret;
1672 }
1673
1674 void tracing_reset(struct trace_buffer *buf, int cpu)
1675 {
1676 struct ring_buffer *buffer = buf->buffer;
1677
1678 if (!buffer)
1679 return;
1680
1681 ring_buffer_record_disable(buffer);
1682
1683 /* Make sure all commits have finished */
1684 synchronize_sched();
1685 ring_buffer_reset_cpu(buffer, cpu);
1686
1687 ring_buffer_record_enable(buffer);
1688 }
1689
1690 void tracing_reset_online_cpus(struct trace_buffer *buf)
1691 {
1692 struct ring_buffer *buffer = buf->buffer;
1693 int cpu;
1694
1695 if (!buffer)
1696 return;
1697
1698 ring_buffer_record_disable(buffer);
1699
1700 /* Make sure all commits have finished */
1701 synchronize_sched();
1702
1703 buf->time_start = buffer_ftrace_now(buf, buf->cpu);
1704
1705 for_each_online_cpu(cpu)
1706 ring_buffer_reset_cpu(buffer, cpu);
1707
1708 ring_buffer_record_enable(buffer);
1709 }
1710
1711 /* Must have trace_types_lock held */
1712 void tracing_reset_all_online_cpus(void)
1713 {
1714 struct trace_array *tr;
1715
1716 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1717 if (!tr->clear_trace)
1718 continue;
1719 tr->clear_trace = false;
1720 tracing_reset_online_cpus(&tr->trace_buffer);
1721 #ifdef CONFIG_TRACER_MAX_TRACE
1722 tracing_reset_online_cpus(&tr->max_buffer);
1723 #endif
1724 }
1725 }
1726
1727 static int *tgid_map;
1728
1729 #define SAVED_CMDLINES_DEFAULT 128
1730 #define NO_CMDLINE_MAP UINT_MAX
1731 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
1732 struct saved_cmdlines_buffer {
1733 unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
1734 unsigned *map_cmdline_to_pid;
1735 unsigned cmdline_num;
1736 int cmdline_idx;
1737 char *saved_cmdlines;
1738 };
1739 static struct saved_cmdlines_buffer *savedcmd;
1740
1741 /* temporary disable recording */
1742 static atomic_t trace_record_taskinfo_disabled __read_mostly;
1743
1744 static inline char *get_saved_cmdlines(int idx)
1745 {
1746 return &savedcmd->saved_cmdlines[idx * TASK_COMM_LEN];
1747 }
1748
1749 static inline void set_cmdline(int idx, const char *cmdline)
1750 {
1751 memcpy(get_saved_cmdlines(idx), cmdline, TASK_COMM_LEN);
1752 }
1753
1754 static int allocate_cmdlines_buffer(unsigned int val,
1755 struct saved_cmdlines_buffer *s)
1756 {
1757 s->map_cmdline_to_pid = kmalloc_array(val,
1758 sizeof(*s->map_cmdline_to_pid),
1759 GFP_KERNEL);
1760 if (!s->map_cmdline_to_pid)
1761 return -ENOMEM;
1762
1763 s->saved_cmdlines = kmalloc_array(TASK_COMM_LEN, val, GFP_KERNEL);
1764 if (!s->saved_cmdlines) {
1765 kfree(s->map_cmdline_to_pid);
1766 return -ENOMEM;
1767 }
1768
1769 s->cmdline_idx = 0;
1770 s->cmdline_num = val;
1771 memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
1772 sizeof(s->map_pid_to_cmdline));
1773 memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
1774 val * sizeof(*s->map_cmdline_to_pid));
1775
1776 return 0;
1777 }
1778
1779 static int trace_create_savedcmd(void)
1780 {
1781 int ret;
1782
1783 savedcmd = kmalloc(sizeof(*savedcmd), GFP_KERNEL);
1784 if (!savedcmd)
1785 return -ENOMEM;
1786
1787 ret = allocate_cmdlines_buffer(SAVED_CMDLINES_DEFAULT, savedcmd);
1788 if (ret < 0) {
1789 kfree(savedcmd);
1790 savedcmd = NULL;
1791 return -ENOMEM;
1792 }
1793
1794 return 0;
1795 }
1796
1797 int is_tracing_stopped(void)
1798 {
1799 return global_trace.stop_count;
1800 }
1801
1802 /**
1803 * tracing_start - quick start of the tracer
1804 *
1805 * If tracing is enabled but was stopped by tracing_stop,
1806 * this will start the tracer back up.
1807 */
1808 void tracing_start(void)
1809 {
1810 struct ring_buffer *buffer;
1811 unsigned long flags;
1812
1813 if (tracing_disabled)
1814 return;
1815
1816 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1817 if (--global_trace.stop_count) {
1818 if (global_trace.stop_count < 0) {
1819 /* Someone screwed up their debugging */
1820 WARN_ON_ONCE(1);
1821 global_trace.stop_count = 0;
1822 }
1823 goto out;
1824 }
1825
1826 /* Prevent the buffers from switching */
1827 arch_spin_lock(&global_trace.max_lock);
1828
1829 buffer = global_trace.trace_buffer.buffer;
1830 if (buffer)
1831 ring_buffer_record_enable(buffer);
1832
1833 #ifdef CONFIG_TRACER_MAX_TRACE
1834 buffer = global_trace.max_buffer.buffer;
1835 if (buffer)
1836 ring_buffer_record_enable(buffer);
1837 #endif
1838
1839 arch_spin_unlock(&global_trace.max_lock);
1840
1841 out:
1842 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1843 }
1844
1845 static void tracing_start_tr(struct trace_array *tr)
1846 {
1847 struct ring_buffer *buffer;
1848 unsigned long flags;
1849
1850 if (tracing_disabled)
1851 return;
1852
1853 /* If global, we need to also start the max tracer */
1854 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1855 return tracing_start();
1856
1857 raw_spin_lock_irqsave(&tr->start_lock, flags);
1858
1859 if (--tr->stop_count) {
1860 if (tr->stop_count < 0) {
1861 /* Someone screwed up their debugging */
1862 WARN_ON_ONCE(1);
1863 tr->stop_count = 0;
1864 }
1865 goto out;
1866 }
1867
1868 buffer = tr->trace_buffer.buffer;
1869 if (buffer)
1870 ring_buffer_record_enable(buffer);
1871
1872 out:
1873 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1874 }
1875
1876 /**
1877 * tracing_stop - quick stop of the tracer
1878 *
1879 * Light weight way to stop tracing. Use in conjunction with
1880 * tracing_start.
1881 */
1882 void tracing_stop(void)
1883 {
1884 struct ring_buffer *buffer;
1885 unsigned long flags;
1886
1887 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1888 if (global_trace.stop_count++)
1889 goto out;
1890
1891 /* Prevent the buffers from switching */
1892 arch_spin_lock(&global_trace.max_lock);
1893
1894 buffer = global_trace.trace_buffer.buffer;
1895 if (buffer)
1896 ring_buffer_record_disable(buffer);
1897
1898 #ifdef CONFIG_TRACER_MAX_TRACE
1899 buffer = global_trace.max_buffer.buffer;
1900 if (buffer)
1901 ring_buffer_record_disable(buffer);
1902 #endif
1903
1904 arch_spin_unlock(&global_trace.max_lock);
1905
1906 out:
1907 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1908 }
1909
1910 static void tracing_stop_tr(struct trace_array *tr)
1911 {
1912 struct ring_buffer *buffer;
1913 unsigned long flags;
1914
1915 /* If global, we need to also stop the max tracer */
1916 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
1917 return tracing_stop();
1918
1919 raw_spin_lock_irqsave(&tr->start_lock, flags);
1920 if (tr->stop_count++)
1921 goto out;
1922
1923 buffer = tr->trace_buffer.buffer;
1924 if (buffer)
1925 ring_buffer_record_disable(buffer);
1926
1927 out:
1928 raw_spin_unlock_irqrestore(&tr->start_lock, flags);
1929 }
1930
1931 static int trace_save_cmdline(struct task_struct *tsk)
1932 {
1933 unsigned pid, idx;
1934
1935 /* treat recording of idle task as a success */
1936 if (!tsk->pid)
1937 return 1;
1938
1939 if (unlikely(tsk->pid > PID_MAX_DEFAULT))
1940 return 0;
1941
1942 /*
1943 * It's not the end of the world if we don't get
1944 * the lock, but we also don't want to spin
1945 * nor do we want to disable interrupts,
1946 * so if we miss here, then better luck next time.
1947 */
1948 if (!arch_spin_trylock(&trace_cmdline_lock))
1949 return 0;
1950
1951 idx = savedcmd->map_pid_to_cmdline[tsk->pid];
1952 if (idx == NO_CMDLINE_MAP) {
1953 idx = (savedcmd->cmdline_idx + 1) % savedcmd->cmdline_num;
1954
1955 /*
1956 * Check whether the cmdline buffer at idx has a pid
1957 * mapped. We are going to overwrite that entry so we
1958 * need to clear the map_pid_to_cmdline. Otherwise we
1959 * would read the new comm for the old pid.
1960 */
1961 pid = savedcmd->map_cmdline_to_pid[idx];
1962 if (pid != NO_CMDLINE_MAP)
1963 savedcmd->map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1964
1965 savedcmd->map_cmdline_to_pid[idx] = tsk->pid;
1966 savedcmd->map_pid_to_cmdline[tsk->pid] = idx;
1967
1968 savedcmd->cmdline_idx = idx;
1969 }
1970
1971 set_cmdline(idx, tsk->comm);
1972
1973 arch_spin_unlock(&trace_cmdline_lock);
1974
1975 return 1;
1976 }
1977
1978 static void __trace_find_cmdline(int pid, char comm[])
1979 {
1980 unsigned map;
1981
1982 if (!pid) {
1983 strcpy(comm, "<idle>");
1984 return;
1985 }
1986
1987 if (WARN_ON_ONCE(pid < 0)) {
1988 strcpy(comm, "<XXX>");
1989 return;
1990 }
1991
1992 if (pid > PID_MAX_DEFAULT) {
1993 strcpy(comm, "<...>");
1994 return;
1995 }
1996
1997 map = savedcmd->map_pid_to_cmdline[pid];
1998 if (map != NO_CMDLINE_MAP)
1999 strlcpy(comm, get_saved_cmdlines(map), TASK_COMM_LEN);
2000 else
2001 strcpy(comm, "<...>");
2002 }
2003
2004 void trace_find_cmdline(int pid, char comm[])
2005 {
2006 preempt_disable();
2007 arch_spin_lock(&trace_cmdline_lock);
2008
2009 __trace_find_cmdline(pid, comm);
2010
2011 arch_spin_unlock(&trace_cmdline_lock);
2012 preempt_enable();
2013 }
2014
2015 int trace_find_tgid(int pid)
2016 {
2017 if (unlikely(!tgid_map || !pid || pid > PID_MAX_DEFAULT))
2018 return 0;
2019
2020 return tgid_map[pid];
2021 }
2022
2023 static int trace_save_tgid(struct task_struct *tsk)
2024 {
2025 /* treat recording of idle task as a success */
2026 if (!tsk->pid)
2027 return 1;
2028
2029 if (unlikely(!tgid_map || tsk->pid > PID_MAX_DEFAULT))
2030 return 0;
2031
2032 tgid_map[tsk->pid] = tsk->tgid;
2033 return 1;
2034 }
2035
2036 static bool tracing_record_taskinfo_skip(int flags)
2037 {
2038 if (unlikely(!(flags & (TRACE_RECORD_CMDLINE | TRACE_RECORD_TGID))))
2039 return true;
2040 if (atomic_read(&trace_record_taskinfo_disabled) || !tracing_is_on())
2041 return true;
2042 if (!__this_cpu_read(trace_taskinfo_save))
2043 return true;
2044 return false;
2045 }
2046
2047 /**
2048 * tracing_record_taskinfo - record the task info of a task
2049 *
2050 * @task - task to record
2051 * @flags - TRACE_RECORD_CMDLINE for recording comm
2052 * - TRACE_RECORD_TGID for recording tgid
2053 */
2054 void tracing_record_taskinfo(struct task_struct *task, int flags)
2055 {
2056 bool done;
2057
2058 if (tracing_record_taskinfo_skip(flags))
2059 return;
2060
2061 /*
2062 * Record as much task information as possible. If some fail, continue
2063 * to try to record the others.
2064 */
2065 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(task);
2066 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(task);
2067
2068 /* If recording any information failed, retry again soon. */
2069 if (!done)
2070 return;
2071
2072 __this_cpu_write(trace_taskinfo_save, false);
2073 }
2074
2075 /**
2076 * tracing_record_taskinfo_sched_switch - record task info for sched_switch
2077 *
2078 * @prev - previous task during sched_switch
2079 * @next - next task during sched_switch
2080 * @flags - TRACE_RECORD_CMDLINE for recording comm
2081 * TRACE_RECORD_TGID for recording tgid
2082 */
2083 void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
2084 struct task_struct *next, int flags)
2085 {
2086 bool done;
2087
2088 if (tracing_record_taskinfo_skip(flags))
2089 return;
2090
2091 /*
2092 * Record as much task information as possible. If some fail, continue
2093 * to try to record the others.
2094 */
2095 done = !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(prev);
2096 done &= !(flags & TRACE_RECORD_CMDLINE) || trace_save_cmdline(next);
2097 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(prev);
2098 done &= !(flags & TRACE_RECORD_TGID) || trace_save_tgid(next);
2099
2100 /* If recording any information failed, retry again soon. */
2101 if (!done)
2102 return;
2103
2104 __this_cpu_write(trace_taskinfo_save, false);
2105 }
2106
2107 /* Helpers to record a specific task information */
2108 void tracing_record_cmdline(struct task_struct *task)
2109 {
2110 tracing_record_taskinfo(task, TRACE_RECORD_CMDLINE);
2111 }
2112
2113 void tracing_record_tgid(struct task_struct *task)
2114 {
2115 tracing_record_taskinfo(task, TRACE_RECORD_TGID);
2116 }
2117
2118 /*
2119 * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
2120 * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
2121 * simplifies those functions and keeps them in sync.
2122 */
2123 enum print_line_t trace_handle_return(struct trace_seq *s)
2124 {
2125 return trace_seq_has_overflowed(s) ?
2126 TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
2127 }
2128 EXPORT_SYMBOL_GPL(trace_handle_return);
2129
2130 void
2131 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
2132 int pc)
2133 {
2134 struct task_struct *tsk = current;
2135
2136 entry->preempt_count = pc & 0xff;
2137 entry->pid = (tsk) ? tsk->pid : 0;
2138 entry->flags =
2139 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
2140 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
2141 #else
2142 TRACE_FLAG_IRQS_NOSUPPORT |
2143 #endif
2144 ((pc & NMI_MASK ) ? TRACE_FLAG_NMI : 0) |
2145 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
2146 ((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
2147 (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
2148 (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
2149 }
2150 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
2151
2152 struct ring_buffer_event *
2153 trace_buffer_lock_reserve(struct ring_buffer *buffer,
2154 int type,
2155 unsigned long len,
2156 unsigned long flags, int pc)
2157 {
2158 return __trace_buffer_lock_reserve(buffer, type, len, flags, pc);
2159 }
2160
2161 DEFINE_PER_CPU(struct ring_buffer_event *, trace_buffered_event);
2162 DEFINE_PER_CPU(int, trace_buffered_event_cnt);
2163 static int trace_buffered_event_ref;
2164
2165 /**
2166 * trace_buffered_event_enable - enable buffering events
2167 *
2168 * When events are being filtered, it is quicker to use a temporary
2169 * buffer to write the event data into if there's a likely chance
2170 * that it will not be committed. The discard of the ring buffer
2171 * is not as fast as committing, and is much slower than copying
2172 * a commit.
2173 *
2174 * When an event is to be filtered, allocate per cpu buffers to
2175 * write the event data into, and if the event is filtered and discarded
2176 * it is simply dropped, otherwise, the entire data is to be committed
2177 * in one shot.
2178 */
2179 void trace_buffered_event_enable(void)
2180 {
2181 struct ring_buffer_event *event;
2182 struct page *page;
2183 int cpu;
2184
2185 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2186
2187 if (trace_buffered_event_ref++)
2188 return;
2189
2190 for_each_tracing_cpu(cpu) {
2191 page = alloc_pages_node(cpu_to_node(cpu),
2192 GFP_KERNEL | __GFP_NORETRY, 0);
2193 if (!page)
2194 goto failed;
2195
2196 event = page_address(page);
2197 memset(event, 0, sizeof(*event));
2198
2199 per_cpu(trace_buffered_event, cpu) = event;
2200
2201 preempt_disable();
2202 if (cpu == smp_processor_id() &&
2203 this_cpu_read(trace_buffered_event) !=
2204 per_cpu(trace_buffered_event, cpu))
2205 WARN_ON_ONCE(1);
2206 preempt_enable();
2207 }
2208
2209 return;
2210 failed:
2211 trace_buffered_event_disable();
2212 }
2213
2214 static void enable_trace_buffered_event(void *data)
2215 {
2216 /* Probably not needed, but do it anyway */
2217 smp_rmb();
2218 this_cpu_dec(trace_buffered_event_cnt);
2219 }
2220
2221 static void disable_trace_buffered_event(void *data)
2222 {
2223 this_cpu_inc(trace_buffered_event_cnt);
2224 }
2225
2226 /**
2227 * trace_buffered_event_disable - disable buffering events
2228 *
2229 * When a filter is removed, it is faster to not use the buffered
2230 * events, and to commit directly into the ring buffer. Free up
2231 * the temp buffers when there are no more users. This requires
2232 * special synchronization with current events.
2233 */
2234 void trace_buffered_event_disable(void)
2235 {
2236 int cpu;
2237
2238 WARN_ON_ONCE(!mutex_is_locked(&event_mutex));
2239
2240 if (WARN_ON_ONCE(!trace_buffered_event_ref))
2241 return;
2242
2243 if (--trace_buffered_event_ref)
2244 return;
2245
2246 preempt_disable();
2247 /* For each CPU, set the buffer as used. */
2248 smp_call_function_many(tracing_buffer_mask,
2249 disable_trace_buffered_event, NULL, 1);
2250 preempt_enable();
2251
2252 /* Wait for all current users to finish */
2253 synchronize_sched();
2254
2255 for_each_tracing_cpu(cpu) {
2256 free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
2257 per_cpu(trace_buffered_event, cpu) = NULL;
2258 }
2259 /*
2260 * Make sure trace_buffered_event is NULL before clearing
2261 * trace_buffered_event_cnt.
2262 */
2263 smp_wmb();
2264
2265 preempt_disable();
2266 /* Do the work on each cpu */
2267 smp_call_function_many(tracing_buffer_mask,
2268 enable_trace_buffered_event, NULL, 1);
2269 preempt_enable();
2270 }
2271
2272 static struct ring_buffer *temp_buffer;
2273
2274 struct ring_buffer_event *
2275 trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
2276 struct trace_event_file *trace_file,
2277 int type, unsigned long len,
2278 unsigned long flags, int pc)
2279 {
2280 struct ring_buffer_event *entry;
2281 int val;
2282
2283 *current_rb = trace_file->tr->trace_buffer.buffer;
2284
2285 if (!ring_buffer_time_stamp_abs(*current_rb) && (trace_file->flags &
2286 (EVENT_FILE_FL_SOFT_DISABLED | EVENT_FILE_FL_FILTERED)) &&
2287 (entry = this_cpu_read(trace_buffered_event))) {
2288 /* Try to use the per cpu buffer first */
2289 val = this_cpu_inc_return(trace_buffered_event_cnt);
2290 if (val == 1) {
2291 trace_event_setup(entry, type, flags, pc);
2292 entry->array[0] = len;
2293 return entry;
2294 }
2295 this_cpu_dec(trace_buffered_event_cnt);
2296 }
2297
2298 entry = __trace_buffer_lock_reserve(*current_rb,
2299 type, len, flags, pc);
2300 /*
2301 * If tracing is off, but we have triggers enabled
2302 * we still need to look at the event data. Use the temp_buffer
2303 * to store the trace event for the tigger to use. It's recusive
2304 * safe and will not be recorded anywhere.
2305 */
2306 if (!entry && trace_file->flags & EVENT_FILE_FL_TRIGGER_COND) {
2307 *current_rb = temp_buffer;
2308 entry = __trace_buffer_lock_reserve(*current_rb,
2309 type, len, flags, pc);
2310 }
2311 return entry;
2312 }
2313 EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
2314
2315 static DEFINE_SPINLOCK(tracepoint_iter_lock);
2316 static DEFINE_MUTEX(tracepoint_printk_mutex);
2317
2318 static void output_printk(struct trace_event_buffer *fbuffer)
2319 {
2320 struct trace_event_call *event_call;
2321 struct trace_event *event;
2322 unsigned long flags;
2323 struct trace_iterator *iter = tracepoint_print_iter;
2324
2325 /* We should never get here if iter is NULL */
2326 if (WARN_ON_ONCE(!iter))
2327 return;
2328
2329 event_call = fbuffer->trace_file->event_call;
2330 if (!event_call || !event_call->event.funcs ||
2331 !event_call->event.funcs->trace)
2332 return;
2333
2334 event = &fbuffer->trace_file->event_call->event;
2335
2336 spin_lock_irqsave(&tracepoint_iter_lock, flags);
2337 trace_seq_init(&iter->seq);
2338 iter->ent = fbuffer->entry;
2339 event_call->event.funcs->trace(iter, 0, event);
2340 trace_seq_putc(&iter->seq, 0);
2341 printk("%s", iter->seq.buffer);
2342
2343 spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
2344 }
2345
2346 int tracepoint_printk_sysctl(struct ctl_table *table, int write,
2347 void __user *buffer, size_t *lenp,
2348 loff_t *ppos)
2349 {
2350 int save_tracepoint_printk;
2351 int ret;
2352
2353 mutex_lock(&tracepoint_printk_mutex);
2354 save_tracepoint_printk = tracepoint_printk;
2355
2356 ret = proc_dointvec(table, write, buffer, lenp, ppos);
2357
2358 /*
2359 * This will force exiting early, as tracepoint_printk
2360 * is always zero when tracepoint_printk_iter is not allocated
2361 */
2362 if (!tracepoint_print_iter)
2363 tracepoint_printk = 0;
2364
2365 if (save_tracepoint_printk == tracepoint_printk)
2366 goto out;
2367
2368 if (tracepoint_printk)
2369 static_key_enable(&tracepoint_printk_key.key);
2370 else
2371 static_key_disable(&tracepoint_printk_key.key);
2372
2373 out:
2374 mutex_unlock(&tracepoint_printk_mutex);
2375
2376 return ret;
2377 }
2378
2379 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
2380 {
2381 if (static_key_false(&tracepoint_printk_key.key))
2382 output_printk(fbuffer);
2383
2384 event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
2385 fbuffer->event, fbuffer->entry,
2386 fbuffer->flags, fbuffer->pc);
2387 }
2388 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
2389
2390 /*
2391 * Skip 3:
2392 *
2393 * trace_buffer_unlock_commit_regs()
2394 * trace_event_buffer_commit()
2395 * trace_event_raw_event_xxx()
2396 */
2397 # define STACK_SKIP 3
2398
2399 void trace_buffer_unlock_commit_regs(struct trace_array *tr,
2400 struct ring_buffer *buffer,
2401 struct ring_buffer_event *event,
2402 unsigned long flags, int pc,
2403 struct pt_regs *regs)
2404 {
2405 __buffer_unlock_commit(buffer, event);
2406
2407 /*
2408 * If regs is not set, then skip the necessary functions.
2409 * Note, we can still get here via blktrace, wakeup tracer
2410 * and mmiotrace, but that's ok if they lose a function or
2411 * two. They are not that meaningful.
2412 */
2413 ftrace_trace_stack(tr, buffer, flags, regs ? 0 : STACK_SKIP, pc, regs);
2414 ftrace_trace_userstack(buffer, flags, pc);
2415 }
2416
2417 /*
2418 * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
2419 */
2420 void
2421 trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer,
2422 struct ring_buffer_event *event)
2423 {
2424 __buffer_unlock_commit(buffer, event);
2425 }
2426
2427 static void
2428 trace_process_export(struct trace_export *export,
2429 struct ring_buffer_event *event)
2430 {
2431 struct trace_entry *entry;
2432 unsigned int size = 0;
2433
2434 entry = ring_buffer_event_data(event);
2435 size = ring_buffer_event_length(event);
2436 export->write(export, entry, size);
2437 }
2438
2439 static DEFINE_MUTEX(ftrace_export_lock);
2440
2441 static struct trace_export __rcu *ftrace_exports_list __read_mostly;
2442
2443 static DEFINE_STATIC_KEY_FALSE(ftrace_exports_enabled);
2444
2445 static inline void ftrace_exports_enable(void)
2446 {
2447 static_branch_enable(&ftrace_exports_enabled);
2448 }
2449
2450 static inline void ftrace_exports_disable(void)
2451 {
2452 static_branch_disable(&ftrace_exports_enabled);
2453 }
2454
2455 void ftrace_exports(struct ring_buffer_event *event)
2456 {
2457 struct trace_export *export;
2458
2459 preempt_disable_notrace();
2460
2461 export = rcu_dereference_raw_notrace(ftrace_exports_list);
2462 while (export) {
2463 trace_process_export(export, event);
2464 export = rcu_dereference_raw_notrace(export->next);
2465 }
2466
2467 preempt_enable_notrace();
2468 }
2469
2470 static inline void
2471 add_trace_export(struct trace_export **list, struct trace_export *export)
2472 {
2473 rcu_assign_pointer(export->next, *list);
2474 /*
2475 * We are entering export into the list but another
2476 * CPU might be walking that list. We need to make sure
2477 * the export->next pointer is valid before another CPU sees
2478 * the export pointer included into the list.
2479 */
2480 rcu_assign_pointer(*list, export);
2481 }
2482
2483 static inline int
2484 rm_trace_export(struct trace_export **list, struct trace_export *export)
2485 {
2486 struct trace_export **p;
2487
2488 for (p = list; *p != NULL; p = &(*p)->next)
2489 if (*p == export)
2490 break;
2491
2492 if (*p != export)
2493 return -1;
2494
2495 rcu_assign_pointer(*p, (*p)->next);
2496
2497 return 0;
2498 }
2499
2500 static inline void
2501 add_ftrace_export(struct trace_export **list, struct trace_export *export)
2502 {
2503 if (*list == NULL)
2504 ftrace_exports_enable();
2505
2506 add_trace_export(list, export);
2507 }
2508
2509 static inline int
2510 rm_ftrace_export(struct trace_export **list, struct trace_export *export)
2511 {
2512 int ret;
2513
2514 ret = rm_trace_export(list, export);
2515 if (*list == NULL)
2516 ftrace_exports_disable();
2517
2518 return ret;
2519 }
2520
2521 int register_ftrace_export(struct trace_export *export)
2522 {
2523 if (WARN_ON_ONCE(!export->write))
2524 return -1;
2525
2526 mutex_lock(&ftrace_export_lock);
2527
2528 add_ftrace_export(&ftrace_exports_list, export);
2529
2530 mutex_unlock(&ftrace_export_lock);
2531
2532 return 0;
2533 }
2534 EXPORT_SYMBOL_GPL(register_ftrace_export);
2535
2536 int unregister_ftrace_export(struct trace_export *export)
2537 {
2538 int ret;
2539
2540 mutex_lock(&ftrace_export_lock);
2541
2542 ret = rm_ftrace_export(&ftrace_exports_list, export);
2543
2544 mutex_unlock(&ftrace_export_lock);
2545
2546 return ret;
2547 }
2548 EXPORT_SYMBOL_GPL(unregister_ftrace_export);
2549
2550 void
2551 trace_function(struct trace_array *tr,
2552 unsigned long ip, unsigned long parent_ip, unsigned long flags,
2553 int pc)
2554 {
2555 struct trace_event_call *call = &event_function;
2556 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2557 struct ring_buffer_event *event;
2558 struct ftrace_entry *entry;
2559
2560 event = __trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
2561 flags, pc);
2562 if (!event)
2563 return;
2564 entry = ring_buffer_event_data(event);
2565 entry->ip = ip;
2566 entry->parent_ip = parent_ip;
2567
2568 if (!call_filter_check_discard(call, entry, buffer, event)) {
2569 if (static_branch_unlikely(&ftrace_exports_enabled))
2570 ftrace_exports(event);
2571 __buffer_unlock_commit(buffer, event);
2572 }
2573 }
2574
2575 #ifdef CONFIG_STACKTRACE
2576
2577 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
2578 struct ftrace_stack {
2579 unsigned long calls[FTRACE_STACK_MAX_ENTRIES];
2580 };
2581
2582 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
2583 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
2584
2585 static void __ftrace_trace_stack(struct ring_buffer *buffer,
2586 unsigned long flags,
2587 int skip, int pc, struct pt_regs *regs)
2588 {
2589 struct trace_event_call *call = &event_kernel_stack;
2590 struct ring_buffer_event *event;
2591 struct stack_entry *entry;
2592 struct stack_trace trace;
2593 int use_stack;
2594 int size = FTRACE_STACK_ENTRIES;
2595
2596 trace.nr_entries = 0;
2597 trace.skip = skip;
2598
2599 /*
2600 * Add one, for this function and the call to save_stack_trace()
2601 * If regs is set, then these functions will not be in the way.
2602 */
2603 #ifndef CONFIG_UNWINDER_ORC
2604 if (!regs)
2605 trace.skip++;
2606 #endif
2607
2608 /*
2609 * Since events can happen in NMIs there's no safe way to
2610 * use the per cpu ftrace_stacks. We reserve it and if an interrupt
2611 * or NMI comes in, it will just have to use the default
2612 * FTRACE_STACK_SIZE.
2613 */
2614 preempt_disable_notrace();
2615
2616 use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
2617 /*
2618 * We don't need any atomic variables, just a barrier.
2619 * If an interrupt comes in, we don't care, because it would
2620 * have exited and put the counter back to what we want.
2621 * We just need a barrier to keep gcc from moving things
2622 * around.
2623 */
2624 barrier();
2625 if (use_stack == 1) {
2626 trace.entries = this_cpu_ptr(ftrace_stack.calls);
2627 trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
2628
2629 if (regs)
2630 save_stack_trace_regs(regs, &trace);
2631 else
2632 save_stack_trace(&trace);
2633
2634 if (trace.nr_entries > size)
2635 size = trace.nr_entries;
2636 } else
2637 /* From now on, use_stack is a boolean */
2638 use_stack = 0;
2639
2640 size *= sizeof(unsigned long);
2641
2642 event = __trace_buffer_lock_reserve(buffer, TRACE_STACK,
2643 sizeof(*entry) + size, flags, pc);
2644 if (!event)
2645 goto out;
2646 entry = ring_buffer_event_data(event);
2647
2648 memset(&entry->caller, 0, size);
2649
2650 if (use_stack)
2651 memcpy(&entry->caller, trace.entries,
2652 trace.nr_entries * sizeof(unsigned long));
2653 else {
2654 trace.max_entries = FTRACE_STACK_ENTRIES;
2655 trace.entries = entry->caller;
2656 if (regs)
2657 save_stack_trace_regs(regs, &trace);
2658 else
2659 save_stack_trace(&trace);
2660 }
2661
2662 entry->size = trace.nr_entries;
2663
2664 if (!call_filter_check_discard(call, entry, buffer, event))
2665 __buffer_unlock_commit(buffer, event);
2666
2667 out:
2668 /* Again, don't let gcc optimize things here */
2669 barrier();
2670 __this_cpu_dec(ftrace_stack_reserve);
2671 preempt_enable_notrace();
2672
2673 }
2674
2675 static inline void ftrace_trace_stack(struct trace_array *tr,
2676 struct ring_buffer *buffer,
2677 unsigned long flags,
2678 int skip, int pc, struct pt_regs *regs)
2679 {
2680 if (!(tr->trace_flags & TRACE_ITER_STACKTRACE))
2681 return;
2682
2683 __ftrace_trace_stack(buffer, flags, skip, pc, regs);
2684 }
2685
2686 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
2687 int pc)
2688 {
2689 struct ring_buffer *buffer = tr->trace_buffer.buffer;
2690
2691 if (rcu_is_watching()) {
2692 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2693 return;
2694 }
2695
2696 /*
2697 * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
2698 * but if the above rcu_is_watching() failed, then the NMI
2699 * triggered someplace critical, and rcu_irq_enter() should
2700 * not be called from NMI.
2701 */
2702 if (unlikely(in_nmi()))
2703 return;
2704
2705 rcu_irq_enter_irqson();
2706 __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
2707 rcu_irq_exit_irqson();
2708 }
2709
2710 /**
2711 * trace_dump_stack - record a stack back trace in the trace buffer
2712 * @skip: Number of functions to skip (helper handlers)
2713 */
2714 void trace_dump_stack(int skip)
2715 {
2716 unsigned long flags;
2717
2718 if (tracing_disabled || tracing_selftest_running)
2719 return;
2720
2721 local_save_flags(flags);
2722
2723 #ifndef CONFIG_UNWINDER_ORC
2724 /* Skip 1 to skip this function. */
2725 skip++;
2726 #endif
2727 __ftrace_trace_stack(global_trace.trace_buffer.buffer,
2728 flags, skip, preempt_count(), NULL);
2729 }
2730 EXPORT_SYMBOL_GPL(trace_dump_stack);
2731
2732 static DEFINE_PER_CPU(int, user_stack_count);
2733
2734 void
2735 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
2736 {
2737 struct trace_event_call *call = &event_user_stack;
2738 struct ring_buffer_event *event;
2739 struct userstack_entry *entry;
2740 struct stack_trace trace;
2741
2742 if (!(global_trace.trace_flags & TRACE_ITER_USERSTACKTRACE))
2743 return;
2744
2745 /*
2746 * NMIs can not handle page faults, even with fix ups.
2747 * The save user stack can (and often does) fault.
2748 */
2749 if (unlikely(in_nmi()))
2750 return;
2751
2752 /*
2753 * prevent recursion, since the user stack tracing may
2754 * trigger other kernel events.
2755 */
2756 preempt_disable();
2757 if (__this_cpu_read(user_stack_count))
2758 goto out;
2759
2760 __this_cpu_inc(user_stack_count);
2761
2762 event = __trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
2763 sizeof(*entry), flags, pc);
2764 if (!event)
2765 goto out_drop_count;
2766 entry = ring_buffer_event_data(event);
2767
2768 entry->tgid = current->tgid;
2769 memset(&entry->caller, 0, sizeof(entry->caller));
2770
2771 trace.nr_entries = 0;
2772 trace.max_entries = FTRACE_STACK_ENTRIES;
2773 trace.skip = 0;
2774 trace.entries = entry->caller;
2775
2776 save_stack_trace_user(&trace);
2777 if (!call_filter_check_discard(call, entry, buffer, event))
2778 __buffer_unlock_commit(buffer, event);
2779
2780 out_drop_count:
2781 __this_cpu_dec(user_stack_count);
2782 out:
2783 preempt_enable();
2784 }
2785
2786 #ifdef UNUSED
2787 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
2788 {
2789 ftrace_trace_userstack(tr, flags, preempt_count());
2790 }
2791 #endif /* UNUSED */
2792
2793 #endif /* CONFIG_STACKTRACE */
2794
2795 /* created for use with alloc_percpu */
2796 struct trace_buffer_struct {
2797 int nesting;
2798 char buffer[4][TRACE_BUF_SIZE];
2799 };
2800
2801 static struct trace_buffer_struct *trace_percpu_buffer;
2802
2803 /*
2804 * Thise allows for lockless recording. If we're nested too deeply, then
2805 * this returns NULL.
2806 */
2807 static char *get_trace_buf(void)
2808 {
2809 struct trace_buffer_struct *buffer = this_cpu_ptr(trace_percpu_buffer);
2810
2811 if (!buffer || buffer->nesting >= 4)
2812 return NULL;
2813
2814 buffer->nesting++;
2815
2816 /* Interrupts must see nesting incremented before we use the buffer */
2817 barrier();
2818 return &buffer->buffer[buffer->nesting][0];
2819 }
2820
2821 static void put_trace_buf(void)
2822 {
2823 /* Don't let the decrement of nesting leak before this */
2824 barrier();
2825 this_cpu_dec(trace_percpu_buffer->nesting);
2826 }
2827
2828 static int alloc_percpu_trace_buffer(void)
2829 {
2830 struct trace_buffer_struct *buffers;
2831
2832 buffers = alloc_percpu(struct trace_buffer_struct);
2833 if (WARN(!buffers, "Could not allocate percpu trace_printk buffer"))
2834 return -ENOMEM;
2835
2836 trace_percpu_buffer = buffers;
2837 return 0;
2838 }
2839
2840 static int buffers_allocated;
2841
2842 void trace_printk_init_buffers(void)
2843 {
2844 if (buffers_allocated)
2845 return;
2846
2847 if (alloc_percpu_trace_buffer())
2848 return;
2849
2850 /* trace_printk() is for debug use only. Don't use it in production. */
2851
2852 pr_warn("\n");
2853 pr_warn("**********************************************************\n");
2854 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2855 pr_warn("** **\n");
2856 pr_warn("** trace_printk() being used. Allocating extra memory. **\n");
2857 pr_warn("** **\n");
2858 pr_warn("** This means that this is a DEBUG kernel and it is **\n");
2859 pr_warn("** unsafe for production use. **\n");
2860 pr_warn("** **\n");
2861 pr_warn("** If you see this message and you are not debugging **\n");
2862 pr_warn("** the kernel, report this immediately to your vendor! **\n");
2863 pr_warn("** **\n");
2864 pr_warn("** NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE **\n");
2865 pr_warn("**********************************************************\n");
2866
2867 /* Expand the buffers to set size */
2868 tracing_update_buffers();
2869
2870 buffers_allocated = 1;
2871
2872 /*
2873 * trace_printk_init_buffers() can be called by modules.
2874 * If that happens, then we need to start cmdline recording
2875 * directly here. If the global_trace.buffer is already
2876 * allocated here, then this was called by module code.
2877 */
2878 if (global_trace.trace_buffer.buffer)
2879 tracing_start_cmdline_record();
2880 }
2881
2882 void trace_printk_start_comm(void)
2883 {
2884 /* Start tracing comms if trace printk is set */
2885 if (!buffers_allocated)
2886 return;
2887 tracing_start_cmdline_record();
2888 }
2889
2890 static void trace_printk_start_stop_comm(int enabled)
2891 {
2892 if (!buffers_allocated)
2893 return;
2894
2895 if (enabled)
2896 tracing_start_cmdline_record();
2897 else
2898 tracing_stop_cmdline_record();
2899 }
2900
2901 /**
2902 * trace_vbprintk - write binary msg to tracing buffer
2903 *
2904 */
2905 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
2906 {
2907 struct trace_event_call *call = &event_bprint;
2908 struct ring_buffer_event *event;
2909 struct ring_buffer *buffer;
2910 struct trace_array *tr = &global_trace;
2911 struct bprint_entry *entry;
2912 unsigned long flags;
2913 char *tbuffer;
2914 int len = 0, size, pc;
2915
2916 if (unlikely(tracing_selftest_running || tracing_disabled))
2917 return 0;
2918
2919 /* Don't pollute graph traces with trace_vprintk internals */
2920 pause_graph_tracing();
2921
2922 pc = preempt_count();
2923 preempt_disable_notrace();
2924
2925 tbuffer = get_trace_buf();
2926 if (!tbuffer) {
2927 len = 0;
2928 goto out_nobuffer;
2929 }
2930
2931 len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
2932
2933 if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
2934 goto out;
2935
2936 local_save_flags(flags);
2937 size = sizeof(*entry) + sizeof(u32) * len;
2938 buffer = tr->trace_buffer.buffer;
2939 event = __trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
2940 flags, pc);
2941 if (!event)
2942 goto out;
2943 entry = ring_buffer_event_data(event);
2944 entry->ip = ip;
2945 entry->fmt = fmt;
2946
2947 memcpy(entry->buf, tbuffer, sizeof(u32) * len);
2948 if (!call_filter_check_discard(call, entry, buffer, event)) {
2949 __buffer_unlock_commit(buffer, event);
2950 ftrace_trace_stack(tr, buffer, flags, 6, pc, NULL);
2951 }
2952
2953 out:
2954 put_trace_buf();
2955
2956 out_nobuffer:
2957 preempt_enable_notrace();
2958 unpause_graph_tracing();
2959
2960 return len;
2961 }
2962 EXPORT_SYMBOL_GPL(trace_vbprintk);
2963
2964 __printf(3, 0)
2965 static int
2966 __trace_array_vprintk(struct ring_buffer *buffer,
2967 unsigned long ip, const char *fmt, va_list args)
2968 {
2969 struct trace_event_call *call = &event_print;
2970 struct ring_buffer_event *event;
2971 int len = 0, size, pc;
2972 struct print_entry *entry;
2973 unsigned long flags;
2974 char *tbuffer;
2975
2976 if (tracing_disabled || tracing_selftest_running)
2977 return 0;
2978
2979 /* Don't pollute graph traces with trace_vprintk internals */
2980 pause_graph_tracing();
2981
2982 pc = preempt_count();
2983 preempt_disable_notrace();
2984
2985
2986 tbuffer = get_trace_buf();
2987 if (!tbuffer) {
2988 len = 0;
2989 goto out_nobuffer;
2990 }
2991
2992 len = vscnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
2993
2994 local_save_flags(flags);
2995 size = sizeof(*entry) + len + 1;
2996 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
2997 flags, pc);
2998 if (!event)
2999 goto out;
3000 entry = ring_buffer_event_data(event);
3001 entry->ip = ip;
3002
3003 memcpy(&entry->buf, tbuffer, len + 1);
3004 if (!call_filter_check_discard(call, entry, buffer, event)) {
3005 __buffer_unlock_commit(buffer, event);
3006 ftrace_trace_stack(&global_trace, buffer, flags, 6, pc, NULL);
3007 }
3008
3009 out:
3010 put_trace_buf();
3011
3012 out_nobuffer:
3013 preempt_enable_notrace();
3014 unpause_graph_tracing();
3015
3016 return len;
3017 }
3018
3019 __printf(3, 0)
3020 int trace_array_vprintk(struct trace_array *tr,
3021 unsigned long ip, const char *fmt, va_list args)
3022 {
3023 return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
3024 }
3025
3026 __printf(3, 0)
3027 int trace_array_printk(struct trace_array *tr,
3028 unsigned long ip, const char *fmt, ...)
3029 {
3030 int ret;
3031 va_list ap;
3032
3033 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3034 return 0;
3035
3036 va_start(ap, fmt);
3037 ret = trace_array_vprintk(tr, ip, fmt, ap);
3038 va_end(ap);
3039 return ret;
3040 }
3041
3042 __printf(3, 4)
3043 int trace_array_printk_buf(struct ring_buffer *buffer,
3044 unsigned long ip, const char *fmt, ...)
3045 {
3046 int ret;
3047 va_list ap;
3048
3049 if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
3050 return 0;
3051
3052 va_start(ap, fmt);
3053 ret = __trace_array_vprintk(buffer, ip, fmt, ap);
3054 va_end(ap);
3055 return ret;
3056 }
3057
3058 __printf(2, 0)
3059 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
3060 {
3061 return trace_array_vprintk(&global_trace, ip, fmt, args);
3062 }
3063 EXPORT_SYMBOL_GPL(trace_vprintk);
3064
3065 static void trace_iterator_increment(struct trace_iterator *iter)
3066 {
3067 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
3068
3069 iter->idx++;
3070 if (buf_iter)
3071 ring_buffer_read(buf_iter, NULL);
3072 }
3073
3074 static struct trace_entry *
3075 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
3076 unsigned long *lost_events)
3077 {
3078 struct ring_buffer_event *event;
3079 struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
3080
3081 if (buf_iter)
3082 event = ring_buffer_iter_peek(buf_iter, ts);
3083 else
3084 event = ring_buffer_peek(iter->trace_buffer->buffer, cpu, ts,
3085 lost_events);
3086
3087 if (event) {
3088 iter->ent_size = ring_buffer_event_length(event);
3089 return ring_buffer_event_data(event);
3090 }
3091 iter->ent_size = 0;
3092 return NULL;
3093 }
3094
3095 static struct trace_entry *
3096 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
3097 unsigned long *missing_events, u64 *ent_ts)
3098 {
3099 struct ring_buffer *buffer = iter->trace_buffer->buffer;
3100 struct trace_entry *ent, *next = NULL;
3101 unsigned long lost_events = 0, next_lost = 0;
3102 int cpu_file = iter->cpu_file;
3103 u64 next_ts = 0, ts;
3104 int next_cpu = -1;
3105 int next_size = 0;
3106 int cpu;
3107
3108 /*
3109 * If we are in a per_cpu trace file, don't bother by iterating over
3110 * all cpu and peek directly.
3111 */
3112 if (cpu_file > RING_BUFFER_ALL_CPUS) {
3113 if (ring_buffer_empty_cpu(buffer, cpu_file))
3114 return NULL;
3115 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
3116 if (ent_cpu)
3117 *ent_cpu = cpu_file;
3118
3119 return ent;
3120 }
3121
3122 for_each_tracing_cpu(cpu) {
3123
3124 if (ring_buffer_empty_cpu(buffer, cpu))
3125 continue;
3126
3127 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
3128
3129 /*
3130 * Pick the entry with the smallest timestamp:
3131 */
3132 if (ent && (!next || ts < next_ts)) {
3133 next = ent;
3134 next_cpu = cpu;
3135 next_ts = ts;
3136 next_lost = lost_events;
3137 next_size = iter->ent_size;
3138 }
3139 }
3140
3141 iter->ent_size = next_size;
3142
3143 if (ent_cpu)
3144 *ent_cpu = next_cpu;
3145
3146 if (ent_ts)
3147 *ent_ts = next_ts;
3148
3149 if (missing_events)
3150 *missing_events = next_lost;
3151
3152 return next;
3153 }
3154
3155 /* Find the next real entry, without updating the iterator itself */
3156 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
3157 int *ent_cpu, u64 *ent_ts)
3158 {
3159 return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
3160 }
3161
3162 /* Find the next real entry, and increment the iterator to the next entry */
3163 void *trace_find_next_entry_inc(struct trace_iterator *iter)
3164 {
3165 iter->ent = __find_next_entry(iter, &iter->cpu,
3166 &iter->lost_events, &iter->ts);
3167
3168 if (iter->ent)
3169 trace_iterator_increment(iter);
3170
3171 return iter->ent ? iter : NULL;
3172 }
3173
3174 static void trace_consume(struct trace_iterator *iter)
3175 {
3176 ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu, &iter->ts,
3177 &iter->lost_events);
3178 }
3179
3180 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
3181 {
3182 struct trace_iterator *iter = m->private;
3183 int i = (int)*pos;
3184 void *ent;
3185
3186 WARN_ON_ONCE(iter->leftover);
3187
3188 (*pos)++;
3189
3190 /* can't go backwards */
3191 if (iter->idx > i)
3192 return NULL;
3193
3194 if (iter->idx < 0)
3195 ent = trace_find_next_entry_inc(iter);
3196 else
3197 ent = iter;
3198
3199 while (ent && iter->idx < i)
3200 ent = trace_find_next_entry_inc(iter);
3201
3202 iter->pos = *pos;
3203
3204 return ent;
3205 }
3206
3207 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
3208 {
3209 struct ring_buffer_event *event;
3210 struct ring_buffer_iter *buf_iter;
3211 unsigned long entries = 0;
3212 u64 ts;
3213
3214 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = 0;
3215
3216 buf_iter = trace_buffer_iter(iter, cpu);
3217 if (!buf_iter)
3218 return;
3219
3220 ring_buffer_iter_reset(buf_iter);
3221
3222 /*
3223 * We could have the case with the max latency tracers
3224 * that a reset never took place on a cpu. This is evident
3225 * by the timestamp being before the start of the buffer.
3226 */
3227 while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
3228 if (ts >= iter->trace_buffer->time_start)
3229 break;
3230 entries++;
3231 ring_buffer_read(buf_iter, NULL);
3232 }
3233
3234 per_cpu_ptr(iter->trace_buffer->data, cpu)->skipped_entries = entries;
3235 }
3236
3237 /*
3238 * The current tracer is copied to avoid a global locking
3239 * all around.
3240 */
3241 static void *s_start(struct seq_file *m, loff_t *pos)
3242 {
3243 struct trace_iterator *iter = m->private;
3244 struct trace_array *tr = iter->tr;
3245 int cpu_file = iter->cpu_file;
3246 void *p = NULL;
3247 loff_t l = 0;
3248 int cpu;
3249
3250 /*
3251 * copy the tracer to avoid using a global lock all around.
3252 * iter->trace is a copy of current_trace, the pointer to the
3253 * name may be used instead of a strcmp(), as iter->trace->name
3254 * will point to the same string as current_trace->name.
3255 */
3256 mutex_lock(&trace_types_lock);
3257 if (unlikely(tr->current_trace && iter->trace->name != tr->current_trace->name))
3258 *iter->trace = *tr->current_trace;
3259 mutex_unlock(&trace_types_lock);
3260
3261 #ifdef CONFIG_TRACER_MAX_TRACE
3262 if (iter->snapshot && iter->trace->use_max_tr)
3263 return ERR_PTR(-EBUSY);
3264 #endif
3265
3266 if (!iter->snapshot)
3267 atomic_inc(&trace_record_taskinfo_disabled);
3268
3269 if (*pos != iter->pos) {
3270 iter->ent = NULL;
3271 iter->cpu = 0;
3272 iter->idx = -1;
3273
3274 if (cpu_file == RING_BUFFER_ALL_CPUS) {
3275 for_each_tracing_cpu(cpu)
3276 tracing_iter_reset(iter, cpu);
3277 } else
3278 tracing_iter_reset(iter, cpu_file);
3279
3280 iter->leftover = 0;
3281 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
3282 ;
3283
3284 } else {
3285 /*
3286 * If we overflowed the seq_file before, then we want
3287 * to just reuse the trace_seq buffer again.
3288 */
3289 if (iter->leftover)
3290 p = iter;
3291 else {
3292 l = *pos - 1;
3293 p = s_next(m, p, &l);
3294 }
3295 }
3296
3297 trace_event_read_lock();
3298 trace_access_lock(cpu_file);
3299 return p;
3300 }
3301
3302 static void s_stop(struct seq_file *m, void *p)
3303 {
3304 struct trace_iterator *iter = m->private;
3305
3306 #ifdef CONFIG_TRACER_MAX_TRACE
3307 if (iter->snapshot && iter->trace->use_max_tr)
3308 return;
3309 #endif
3310
3311 if (!iter->snapshot)
3312 atomic_dec(&trace_record_taskinfo_disabled);
3313
3314 trace_access_unlock(iter->cpu_file);
3315 trace_event_read_unlock();
3316 }
3317
3318 static void
3319 get_total_entries(struct trace_buffer *buf,
3320 unsigned long *total, unsigned long *entries)
3321 {
3322 unsigned long count;
3323 int cpu;
3324
3325 *total = 0;
3326 *entries = 0;
3327
3328 for_each_tracing_cpu(cpu) {
3329 count = ring_buffer_entries_cpu(buf->buffer, cpu);
3330 /*
3331 * If this buffer has skipped entries, then we hold all
3332 * entries for the trace and we need to ignore the
3333 * ones before the time stamp.
3334 */
3335 if (per_cpu_ptr(buf->data, cpu)->skipped_entries) {
3336 count -= per_cpu_ptr(buf->data, cpu)->skipped_entries;
3337 /* total is the same as the entries */
3338 *total += count;
3339 } else
3340 *total += count +
3341 ring_buffer_overrun_cpu(buf->buffer, cpu);
3342 *entries += count;
3343 }
3344 }
3345
3346 static void print_lat_help_header(struct seq_file *m)
3347 {
3348 seq_puts(m, "# _------=> CPU# \n"
3349 "# / _-----=> irqs-off \n"
3350 "# | / _----=> need-resched \n"
3351 "# || / _---=> hardirq/softirq \n"
3352 "# ||| / _--=> preempt-depth \n"
3353 "# |||| / delay \n"
3354 "# cmd pid ||||| time | caller \n"
3355 "# \\ / ||||| \\ | / \n");
3356 }
3357
3358 static void print_event_info(struct trace_buffer *buf, struct seq_file *m)
3359 {
3360 unsigned long total;
3361 unsigned long entries;
3362
3363 get_total_entries(buf, &total, &entries);
3364 seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu #P:%d\n",
3365 entries, total, num_online_cpus());
3366 seq_puts(m, "#\n");
3367 }
3368
3369 static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
3370 unsigned int flags)
3371 {
3372 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3373
3374 print_event_info(buf, m);
3375
3376 seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
3377 seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
3378 }
3379
3380 static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
3381 unsigned int flags)
3382 {
3383 bool tgid = flags & TRACE_ITER_RECORD_TGID;
3384 const char tgid_space[] = " ";
3385 const char space[] = " ";
3386
3387 seq_printf(m, "# %s _-----=> irqs-off\n",
3388 tgid ? tgid_space : space);
3389 seq_printf(m, "# %s / _----=> need-resched\n",
3390 tgid ? tgid_space : space);
3391 seq_printf(m, "# %s| / _---=> hardirq/softirq\n",
3392 tgid ? tgid_space : space);
3393 seq_printf(m, "# %s|| / _--=> preempt-depth\n",
3394 tgid ? tgid_space : space);
3395 seq_printf(m, "# %s||| / delay\n",
3396 tgid ? tgid_space : space);
3397 seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
3398 tgid ? " TGID " : space);
3399 seq_printf(m, "# | | %s | |||| | |\n",
3400 tgid ? " | " : space);
3401 }
3402
3403 void
3404 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
3405 {
3406 unsigned long sym_flags = (global_trace.trace_flags & TRACE_ITER_SYM_MASK);
3407 struct trace_buffer *buf = iter->trace_buffer;
3408 struct trace_array_cpu *data = per_cpu_ptr(buf->data, buf->cpu);
3409 struct tracer *type = iter->trace;
3410 unsigned long entries;
3411 unsigned long total;
3412 const char *name = "preemption";
3413
3414 name = type->name;
3415
3416 get_total_entries(buf, &total, &entries);
3417
3418 seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
3419 name, UTS_RELEASE);
3420 seq_puts(m, "# -----------------------------------"
3421 "---------------------------------\n");
3422 seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
3423 " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
3424 nsecs_to_usecs(data->saved_latency),
3425 entries,
3426 total,
3427 buf->cpu,
3428 #if defined(CONFIG_PREEMPT_NONE)
3429 "server",
3430 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
3431 "desktop",
3432 #elif defined(CONFIG_PREEMPT)
3433 "preempt",
3434 #else
3435 "unknown",
3436 #endif
3437 /* These are reserved for later use */
3438 0, 0, 0, 0);
3439 #ifdef CONFIG_SMP
3440 seq_printf(m, " #P:%d)\n", num_online_cpus());
3441 #else
3442 seq_puts(m, ")\n");
3443 #endif
3444 seq_puts(m, "# -----------------\n");
3445 seq_printf(m, "# | task: %.16s-%d "
3446 "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
3447 data->comm, data->pid,
3448 from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
3449 data->policy, data->rt_priority);
3450 seq_puts(m, "# -----------------\n");
3451
3452 if (data->critical_start) {
3453 seq_puts(m, "# => started at: ");
3454 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
3455 trace_print_seq(m, &iter->seq);
3456 seq_puts(m, "\n# => ended at: ");
3457 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
3458 trace_print_seq(m, &iter->seq);
3459 seq_puts(m, "\n#\n");
3460 }
3461
3462 seq_puts(m, "#\n");
3463 }
3464
3465 static void test_cpu_buff_start(struct trace_iterator *iter)
3466 {
3467 struct trace_seq *s = &iter->seq;
3468 struct trace_array *tr = iter->tr;
3469
3470 if (!(tr->trace_flags & TRACE_ITER_ANNOTATE))
3471 return;
3472
3473 if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
3474 return;
3475
3476 if (cpumask_available(iter->started) &&
3477 cpumask_test_cpu(iter->cpu, iter->started))
3478 return;
3479
3480 if (per_cpu_ptr(iter->trace_buffer->data, iter->cpu)->skipped_entries)
3481 return;
3482
3483 if (cpumask_available(iter->started))
3484 cpumask_set_cpu(iter->cpu, iter->started);
3485
3486 /* Don't print started cpu buffer for the first entry of the trace */
3487 if (iter->idx > 1)
3488 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
3489 iter->cpu);
3490 }
3491
3492 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
3493 {
3494 struct trace_array *tr = iter->tr;
3495 struct trace_seq *s = &iter->seq;
3496 unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
3497 struct trace_entry *entry;
3498 struct trace_event *event;
3499
3500 entry = iter->ent;
3501
3502 test_cpu_buff_start(iter);
3503
3504 event = ftrace_find_event(entry->type);
3505
3506 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3507 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3508 trace_print_lat_context(iter);
3509 else
3510 trace_print_context(iter);
3511 }
3512
3513 if (trace_seq_has_overflowed(s))
3514 return TRACE_TYPE_PARTIAL_LINE;
3515
3516 if (event)
3517 return event->funcs->trace(iter, sym_flags, event);
3518
3519 trace_seq_printf(s, "Unknown type %d\n", entry->type);
3520
3521 return trace_handle_return(s);
3522 }
3523
3524 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
3525 {
3526 struct trace_array *tr = iter->tr;
3527 struct trace_seq *s = &iter->seq;
3528 struct trace_entry *entry;
3529 struct trace_event *event;
3530
3531 entry = iter->ent;
3532
3533 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO)
3534 trace_seq_printf(s, "%d %d %llu ",
3535 entry->pid, iter->cpu, iter->ts);
3536
3537 if (trace_seq_has_overflowed(s))
3538 return TRACE_TYPE_PARTIAL_LINE;
3539
3540 event = ftrace_find_event(entry->type);
3541 if (event)
3542 return event->funcs->raw(iter, 0, event);
3543
3544 trace_seq_printf(s, "%d ?\n", entry->type);
3545
3546 return trace_handle_return(s);
3547 }
3548
3549 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
3550 {
3551 struct trace_array *tr = iter->tr;
3552 struct trace_seq *s = &iter->seq;
3553 unsigned char newline = '\n';
3554 struct trace_entry *entry;
3555 struct trace_event *event;
3556
3557 entry = iter->ent;
3558
3559 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3560 SEQ_PUT_HEX_FIELD(s, entry->pid);
3561 SEQ_PUT_HEX_FIELD(s, iter->cpu);
3562 SEQ_PUT_HEX_FIELD(s, iter->ts);
3563 if (trace_seq_has_overflowed(s))
3564 return TRACE_TYPE_PARTIAL_LINE;
3565 }
3566
3567 event = ftrace_find_event(entry->type);
3568 if (event) {
3569 enum print_line_t ret = event->funcs->hex(iter, 0, event);
3570 if (ret != TRACE_TYPE_HANDLED)
3571 return ret;
3572 }
3573
3574 SEQ_PUT_FIELD(s, newline);
3575
3576 return trace_handle_return(s);
3577 }
3578
3579 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
3580 {
3581 struct trace_array *tr = iter->tr;
3582 struct trace_seq *s = &iter->seq;
3583 struct trace_entry *entry;
3584 struct trace_event *event;
3585
3586 entry = iter->ent;
3587
3588 if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
3589 SEQ_PUT_FIELD(s, entry->pid);
3590 SEQ_PUT_FIELD(s, iter->cpu);
3591 SEQ_PUT_FIELD(s, iter->ts);
3592 if (trace_seq_has_overflowed(s))
3593 return TRACE_TYPE_PARTIAL_LINE;
3594 }
3595
3596 event = ftrace_find_event(entry->type);
3597 return event ? event->funcs->binary(iter, 0, event) :
3598 TRACE_TYPE_HANDLED;
3599 }
3600
3601 int trace_empty(struct trace_iterator *iter)
3602 {
3603 struct ring_buffer_iter *buf_iter;
3604 int cpu;
3605
3606 /* If we are looking at one CPU buffer, only check that one */
3607 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
3608 cpu = iter->cpu_file;
3609 buf_iter = trace_buffer_iter(iter, cpu);
3610 if (buf_iter) {
3611 if (!ring_buffer_iter_empty(buf_iter))
3612 return 0;
3613 } else {
3614 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3615 return 0;
3616 }
3617 return 1;
3618 }
3619
3620 for_each_tracing_cpu(cpu) {
3621 buf_iter = trace_buffer_iter(iter, cpu);
3622 if (buf_iter) {
3623 if (!ring_buffer_iter_empty(buf_iter))
3624 return 0;
3625 } else {
3626 if (!ring_buffer_empty_cpu(iter->trace_buffer->buffer, cpu))
3627 return 0;
3628 }
3629 }
3630
3631 return 1;
3632 }
3633
3634 /* Called with trace_event_read_lock() held. */
3635 enum print_line_t print_trace_line(struct trace_iterator *iter)
3636 {
3637 struct trace_array *tr = iter->tr;
3638 unsigned long trace_flags = tr->trace_flags;
3639 enum print_line_t ret;
3640
3641 if (iter->lost_events) {
3642 trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
3643 iter->cpu, iter->lost_events);
3644 if (trace_seq_has_overflowed(&iter->seq))
3645 return TRACE_TYPE_PARTIAL_LINE;
3646 }
3647
3648 if (iter->trace && iter->trace->print_line) {
3649 ret = iter->trace->print_line(iter);
3650 if (ret != TRACE_TYPE_UNHANDLED)
3651 return ret;
3652 }
3653
3654 if (iter->ent->type == TRACE_BPUTS &&
3655 trace_flags & TRACE_ITER_PRINTK &&
3656 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3657 return trace_print_bputs_msg_only(iter);
3658
3659 if (iter->ent->type == TRACE_BPRINT &&
3660 trace_flags & TRACE_ITER_PRINTK &&
3661 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3662 return trace_print_bprintk_msg_only(iter);
3663
3664 if (iter->ent->type == TRACE_PRINT &&
3665 trace_flags & TRACE_ITER_PRINTK &&
3666 trace_flags & TRACE_ITER_PRINTK_MSGONLY)
3667 return trace_print_printk_msg_only(iter);
3668
3669 if (trace_flags & TRACE_ITER_BIN)
3670 return print_bin_fmt(iter);
3671
3672 if (trace_flags & TRACE_ITER_HEX)
3673 return print_hex_fmt(iter);
3674
3675 if (trace_flags & TRACE_ITER_RAW)
3676 return print_raw_fmt(iter);
3677
3678 return print_trace_fmt(iter);
3679 }
3680
3681 void trace_latency_header(struct seq_file *m)
3682 {
3683 struct trace_iterator *iter = m->private;
3684 struct trace_array *tr = iter->tr;
3685
3686 /* print nothing if the buffers are empty */
3687 if (trace_empty(iter))
3688 return;
3689
3690 if (iter->iter_flags & TRACE_FILE_LAT_FMT)
3691 print_trace_header(m, iter);
3692
3693 if (!(tr->trace_flags & TRACE_ITER_VERBOSE))
3694 print_lat_help_header(m);
3695 }
3696
3697 void trace_default_header(struct seq_file *m)
3698 {
3699 struct trace_iterator *iter = m->private;
3700 struct trace_array *tr = iter->tr;
3701 unsigned long trace_flags = tr->trace_flags;
3702
3703 if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
3704 return;
3705
3706 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
3707 /* print nothing if the buffers are empty */
3708 if (trace_empty(iter))
3709 return;
3710 print_trace_header(m, iter);
3711 if (!(trace_flags & TRACE_ITER_VERBOSE))
3712 print_lat_help_header(m);
3713 } else {
3714 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
3715 if (trace_flags & TRACE_ITER_IRQ_INFO)
3716 print_func_help_header_irq(iter->trace_buffer,
3717 m, trace_flags);
3718 else
3719 print_func_help_header(iter->trace_buffer, m,
3720 trace_flags);
3721 }
3722 }
3723 }
3724
3725 static void test_ftrace_alive(struct seq_file *m)
3726 {
3727 if (!ftrace_is_dead())
3728 return;
3729 seq_puts(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n"
3730 "# MAY BE MISSING FUNCTION EVENTS\n");
3731 }
3732
3733 #ifdef CONFIG_TRACER_MAX_TRACE
3734 static void show_snapshot_main_help(struct seq_file *m)
3735 {
3736 seq_puts(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"
3737 "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3738 "# Takes a snapshot of the main buffer.\n"
3739 "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate or free)\n"
3740 "# (Doesn't have to be '2' works with any number that\n"
3741 "# is not a '0' or '1')\n");
3742 }
3743
3744 static void show_snapshot_percpu_help(struct seq_file *m)
3745 {
3746 seq_puts(m, "# echo 0 > snapshot : Invalid for per_cpu snapshot file.\n");
3747 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
3748 seq_puts(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"
3749 "# Takes a snapshot of the main buffer for this cpu.\n");
3750 #else
3751 seq_puts(m, "# echo 1 > snapshot : Not supported with this kernel.\n"
3752 "# Must use main snapshot file to allocate.\n");
3753 #endif
3754 seq_puts(m, "# echo 2 > snapshot : Clears this cpu's snapshot buffer (but does not allocate)\n"
3755 "# (Doesn't have to be '2' works with any number that\n"
3756 "# is not a '0' or '1')\n");
3757 }
3758
3759 static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
3760 {
3761 if (iter->tr->allocated_snapshot)
3762 seq_puts(m, "#\n# * Snapshot is allocated *\n#\n");
3763 else
3764 seq_puts(m, "#\n# * Snapshot is freed *\n#\n");
3765
3766 seq_puts(m, "# Snapshot commands:\n");
3767 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
3768 show_snapshot_main_help(m);
3769 else
3770 show_snapshot_percpu_help(m);
3771 }
3772 #else
3773 /* Should never be called */
3774 static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { }
3775 #endif
3776
3777 static int s_show(struct seq_file *m, void *v)
3778 {
3779 struct trace_iterator *iter = v;
3780 int ret;
3781
3782 if (iter->ent == NULL) {
3783 if (iter->tr) {
3784 seq_printf(m, "# tracer: %s\n", iter->trace->name);
3785 seq_puts(m, "#\n");
3786 test_ftrace_alive(m);
3787 }
3788 if (iter->snapshot && trace_empty(iter))
3789 print_snapshot_help(m, iter);
3790 else if (iter->trace && iter->trace->print_header)
3791 iter->trace->print_header(m);
3792 else
3793 trace_default_header(m);
3794
3795 } else if (iter->leftover) {
3796 /*
3797 * If we filled the seq_file buffer earlier, we
3798 * want to just show it now.
3799 */
3800 ret = trace_print_seq(m, &iter->seq);
3801
3802 /* ret should this time be zero, but you never know */
3803 iter->leftover = ret;
3804
3805 } else {
3806 print_trace_line(iter);
3807 ret = trace_print_seq(m, &iter->seq);
3808 /*
3809 * If we overflow the seq_file buffer, then it will
3810 * ask us for this data again at start up.
3811 * Use that instead.
3812 * ret is 0 if seq_file write succeeded.
3813 * -1 otherwise.
3814 */
3815 iter->leftover = ret;
3816 }
3817
3818 return 0;
3819 }
3820
3821 /*
3822 * Should be used after trace_array_get(), trace_types_lock
3823 * ensures that i_cdev was already initialized.
3824 */
3825 static inline int tracing_get_cpu(struct inode *inode)
3826 {
3827 if (inode->i_cdev) /* See trace_create_cpu_file() */
3828 return (long)inode->i_cdev - 1;
3829 return RING_BUFFER_ALL_CPUS;
3830 }
3831
3832 static const struct seq_operations tracer_seq_ops = {
3833 .start = s_start,
3834 .next = s_next,
3835 .stop = s_stop,
3836 .show = s_show,
3837 };
3838
3839 static struct trace_iterator *
3840 __tracing_open(struct inode *inode, struct file *file, bool snapshot)
3841 {
3842 struct trace_array *tr = inode->i_private;
3843 struct trace_iterator *iter;
3844 int cpu;
3845
3846 if (tracing_disabled)
3847 return ERR_PTR(-ENODEV);
3848
3849 iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
3850 if (!iter)
3851 return ERR_PTR(-ENOMEM);
3852
3853 iter->buffer_iter = kcalloc(nr_cpu_ids, sizeof(*iter->buffer_iter),
3854 GFP_KERNEL);
3855 if (!iter->buffer_iter)
3856 goto release;
3857
3858 /*
3859 * We make a copy of the current tracer to avoid concurrent
3860 * changes on it while we are reading.
3861 */
3862 mutex_lock(&trace_types_lock);
3863 iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
3864 if (!iter->trace)
3865 goto fail;
3866
3867 *iter->trace = *tr->current_trace;
3868
3869 if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
3870 goto fail;
3871
3872 iter->tr = tr;
3873
3874 #ifdef CONFIG_TRACER_MAX_TRACE
3875 /* Currently only the top directory has a snapshot */
3876 if (tr->current_trace->print_max || snapshot)
3877 iter->trace_buffer = &tr->max_buffer;
3878 else
3879 #endif
3880 iter->trace_buffer = &tr->trace_buffer;
3881 iter->snapshot = snapshot;
3882 iter->pos = -1;
3883 iter->cpu_file = tracing_get_cpu(inode);
3884 mutex_init(&iter->mutex);
3885
3886 /* Notify the tracer early; before we stop tracing. */
3887 if (iter->trace && iter->trace->open)
3888 iter->trace->open(iter);
3889
3890 /* Annotate start of buffers if we had overruns */
3891 if (ring_buffer_overruns(iter->trace_buffer->buffer))
3892 iter->iter_flags |= TRACE_FILE_ANNOTATE;
3893
3894 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
3895 if (trace_clocks[tr->clock_id].in_ns)
3896 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
3897
3898 /* stop the trace while dumping if we are not opening "snapshot" */
3899 if (!iter->snapshot)
3900 tracing_stop_tr(tr);
3901
3902 if (iter->cpu_file == RING_BUFFER_ALL_CPUS) {
3903 for_each_tracing_cpu(cpu) {
3904 iter->buffer_iter[cpu] =
3905 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3906 }
3907 ring_buffer_read_prepare_sync();
3908 for_each_tracing_cpu(cpu) {
3909 ring_buffer_read_start(iter->buffer_iter[cpu]);
3910 tracing_iter_reset(iter, cpu);
3911 }
3912 } else {
3913 cpu = iter->cpu_file;
3914 iter->buffer_iter[cpu] =
3915 ring_buffer_read_prepare(iter->trace_buffer->buffer, cpu);
3916 ring_buffer_read_prepare_sync();
3917 ring_buffer_read_start(iter->buffer_iter[cpu]);
3918 tracing_iter_reset(iter, cpu);
3919 }
3920
3921 mutex_unlock(&trace_types_lock);
3922
3923 return iter;
3924
3925 fail:
3926 mutex_unlock(&trace_types_lock);
3927 kfree(iter->trace);
3928 kfree(iter->buffer_iter);
3929 release:
3930 seq_release_private(inode, file);
3931 return ERR_PTR(-ENOMEM);
3932 }
3933
3934 int tracing_open_generic(struct inode *inode, struct file *filp)
3935 {
3936 if (tracing_disabled)
3937 return -ENODEV;
3938
3939 filp->private_data = inode->i_private;
3940 return 0;
3941 }
3942
3943 bool tracing_is_disabled(void)
3944 {
3945 return (tracing_disabled) ? true: false;
3946 }
3947
3948 /*
3949 * Open and update trace_array ref count.
3950 * Must have the current trace_array passed to it.
3951 */
3952 static int tracing_open_generic_tr(struct inode *inode, struct file *filp)
3953 {
3954 struct trace_array *tr = inode->i_private;
3955
3956 if (tracing_disabled)
3957 return -ENODEV;
3958
3959 if (trace_array_get(tr) < 0)
3960 return -ENODEV;
3961
3962 filp->private_data = inode->i_private;
3963
3964 return 0;
3965 }
3966
3967 static int tracing_release(struct inode *inode, struct file *file)
3968 {
3969 struct trace_array *tr = inode->i_private;
3970 struct seq_file *m = file->private_data;
3971 struct trace_iterator *iter;
3972 int cpu;
3973
3974 if (!(file->f_mode & FMODE_READ)) {
3975 trace_array_put(tr);
3976 return 0;
3977 }
3978
3979 /* Writes do not use seq_file */
3980 iter = m->private;
3981 mutex_lock(&trace_types_lock);
3982
3983 for_each_tracing_cpu(cpu) {
3984 if (iter->buffer_iter[cpu])
3985 ring_buffer_read_finish(iter->buffer_iter[cpu]);
3986 }
3987
3988 if (iter->trace && iter->trace->close)
3989 iter->trace->close(iter);
3990
3991 if (!iter->snapshot)
3992 /* reenable tracing if it was previously enabled */
3993 tracing_start_tr(tr);
3994
3995 __trace_array_put(tr);
3996
3997 mutex_unlock(&trace_types_lock);
3998
3999 mutex_destroy(&iter->mutex);
4000 free_cpumask_var(iter->started);
4001 kfree(iter->trace);
4002 kfree(iter->buffer_iter);
4003 seq_release_private(inode, file);
4004
4005 return 0;
4006 }
4007
4008 static int tracing_release_generic_tr(struct inode *inode, struct file *file)
4009 {
4010 struct trace_array *tr = inode->i_private;
4011
4012 trace_array_put(tr);
4013 return 0;
4014 }
4015
4016 static int tracing_single_release_tr(struct inode *inode, struct file *file)
4017 {
4018 struct trace_array *tr = inode->i_private;
4019
4020 trace_array_put(tr);
4021
4022 return single_release(inode, file);
4023 }
4024
4025 static int tracing_open(struct inode *inode, struct file *file)
4026 {
4027 struct trace_array *tr = inode->i_private;
4028 struct trace_iterator *iter;
4029 int ret = 0;
4030
4031 if (trace_array_get(tr) < 0)
4032 return -ENODEV;
4033
4034 /* If this file was open for write, then erase contents */
4035 if ((file->f_mode & FMODE_WRITE) && (file->f_flags & O_TRUNC)) {
4036 int cpu = tracing_get_cpu(inode);
4037 struct trace_buffer *trace_buf = &tr->trace_buffer;
4038
4039 #ifdef CONFIG_TRACER_MAX_TRACE
4040 if (tr->current_trace->print_max)
4041 trace_buf = &tr->max_buffer;
4042 #endif
4043
4044 if (cpu == RING_BUFFER_ALL_CPUS)
4045 tracing_reset_online_cpus(trace_buf);
4046 else
4047 tracing_reset(trace_buf, cpu);
4048 }
4049
4050 if (file->f_mode & FMODE_READ) {
4051 iter = __tracing_open(inode, file, false);
4052 if (IS_ERR(iter))
4053 ret = PTR_ERR(iter);
4054 else if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
4055 iter->iter_flags |= TRACE_FILE_LAT_FMT;
4056 }
4057
4058 if (ret < 0)
4059 trace_array_put(tr);
4060
4061 return ret;
4062 }
4063
4064 /*
4065 * Some tracers are not suitable for instance buffers.
4066 * A tracer is always available for the global array (toplevel)
4067 * or if it explicitly states that it is.
4068 */
4069 static bool
4070 trace_ok_for_array(struct tracer *t, struct trace_array *tr)
4071 {
4072 return (tr->flags & TRACE_ARRAY_FL_GLOBAL) || t->allow_instances;
4073 }
4074
4075 /* Find the next tracer that this trace array may use */
4076 static struct tracer *
4077 get_tracer_for_array(struct trace_array *tr, struct tracer *t)
4078 {
4079 while (t && !trace_ok_for_array(t, tr))
4080 t = t->next;
4081
4082 return t;
4083 }
4084
4085 static void *
4086 t_next(struct seq_file *m, void *v, loff_t *pos)
4087 {
4088 struct trace_array *tr = m->private;
4089 struct tracer *t = v;
4090
4091 (*pos)++;
4092
4093 if (t)
4094 t = get_tracer_for_array(tr, t->next);
4095
4096 return t;
4097 }
4098
4099 static void *t_start(struct seq_file *m, loff_t *pos)
4100 {
4101 struct trace_array *tr = m->private;
4102 struct tracer *t;
4103 loff_t l = 0;
4104
4105 mutex_lock(&trace_types_lock);
4106
4107 t = get_tracer_for_array(tr, trace_types);
4108 for (; t && l < *pos; t = t_next(m, t, &l))
4109 ;
4110
4111 return t;
4112 }
4113
4114 static void t_stop(struct seq_file *m, void *p)
4115 {
4116 mutex_unlock(&trace_types_lock);
4117 }
4118
4119 static int t_show(struct seq_file *m, void *v)
4120 {
4121 struct tracer *t = v;
4122
4123 if (!t)
4124 return 0;
4125
4126 seq_puts(m, t->name);
4127 if (t->next)
4128 seq_putc(m, ' ');
4129 else
4130 seq_putc(m, '\n');
4131
4132 return 0;
4133 }
4134
4135 static const struct seq_operations show_traces_seq_ops = {
4136 .start = t_start,
4137 .next = t_next,
4138 .stop = t_stop,
4139 .show = t_show,
4140 };
4141
4142 static int show_traces_open(struct inode *inode, struct file *file)
4143 {
4144 struct trace_array *tr = inode->i_private;
4145 struct seq_file *m;
4146 int ret;
4147
4148 if (tracing_disabled)
4149 return -ENODEV;
4150
4151 ret = seq_open(file, &show_traces_seq_ops);
4152 if (ret)
4153 return ret;
4154
4155 m = file->private_data;
4156 m->private = tr;
4157
4158 return 0;
4159 }
4160
4161 static ssize_t
4162 tracing_write_stub(struct file *filp, const char __user *ubuf,
4163 size_t count, loff_t *ppos)
4164 {
4165 return count;
4166 }
4167
4168 loff_t tracing_lseek(struct file *file, loff_t offset, int whence)
4169 {
4170 int ret;
4171
4172 if (file->f_mode & FMODE_READ)
4173 ret = seq_lseek(file, offset, whence);
4174 else
4175 file->f_pos = ret = 0;
4176
4177 return ret;
4178 }
4179
4180 static const struct file_operations tracing_fops = {
4181 .open = tracing_open,
4182 .read = seq_read,
4183 .write = tracing_write_stub,
4184 .llseek = tracing_lseek,
4185 .release = tracing_release,
4186 };
4187
4188 static const struct file_operations show_traces_fops = {
4189 .open = show_traces_open,
4190 .read = seq_read,
4191 .release = seq_release,
4192 .llseek = seq_lseek,
4193 };
4194
4195 static ssize_t
4196 tracing_cpumask_read(struct file *filp, char __user *ubuf,
4197 size_t count, loff_t *ppos)
4198 {
4199 struct trace_array *tr = file_inode(filp)->i_private;
4200 char *mask_str;
4201 int len;
4202
4203 len = snprintf(NULL, 0, "%*pb\n",
4204 cpumask_pr_args(tr->tracing_cpumask)) + 1;
4205 mask_str = kmalloc(len, GFP_KERNEL);
4206 if (!mask_str)
4207 return -ENOMEM;
4208
4209 len = snprintf(mask_str, len, "%*pb\n",
4210 cpumask_pr_args(tr->tracing_cpumask));
4211 if (len >= count) {
4212 count = -EINVAL;
4213 goto out_err;
4214 }
4215 count = simple_read_from_buffer(ubuf, count, ppos, mask_str, len);
4216
4217 out_err:
4218 kfree(mask_str);
4219
4220 return count;
4221 }
4222
4223 static ssize_t
4224 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
4225 size_t count, loff_t *ppos)
4226 {
4227 struct trace_array *tr = file_inode(filp)->i_private;
4228 cpumask_var_t tracing_cpumask_new;
4229 int err, cpu;
4230
4231 if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
4232 return -ENOMEM;
4233
4234 err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
4235 if (err)
4236 goto err_unlock;
4237
4238 local_irq_disable();
4239 arch_spin_lock(&tr->max_lock);
4240 for_each_tracing_cpu(cpu) {
4241 /*
4242 * Increase/decrease the disabled counter if we are
4243 * about to flip a bit in the cpumask:
4244 */
4245 if (cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4246 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4247 atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4248 ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu);
4249 }
4250 if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) &&
4251 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
4252 atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled);
4253 ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu);
4254 }
4255 }
4256 arch_spin_unlock(&tr->max_lock);
4257 local_irq_enable();
4258
4259 cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new);
4260 free_cpumask_var(tracing_cpumask_new);
4261
4262 return count;
4263
4264 err_unlock:
4265 free_cpumask_var(tracing_cpumask_new);
4266
4267 return err;
4268 }
4269
4270 static const struct file_operations tracing_cpumask_fops = {
4271 .open = tracing_open_generic_tr,
4272 .read = tracing_cpumask_read,
4273 .write = tracing_cpumask_write,
4274 .release = tracing_release_generic_tr,
4275 .llseek = generic_file_llseek,
4276 };
4277
4278 static int tracing_trace_options_show(struct seq_file *m, void *v)
4279 {
4280 struct tracer_opt *trace_opts;
4281 struct trace_array *tr = m->private;
4282 u32 tracer_flags;
4283 int i;
4284
4285 mutex_lock(&trace_types_lock);
4286 tracer_flags = tr->current_trace->flags->val;
4287 trace_opts = tr->current_trace->flags->opts;
4288
4289 for (i = 0; trace_options[i]; i++) {
4290 if (tr->trace_flags & (1 << i))
4291 seq_printf(m, "%s\n", trace_options[i]);
4292 else
4293 seq_printf(m, "no%s\n", trace_options[i]);
4294 }
4295
4296 for (i = 0; trace_opts[i].name; i++) {
4297 if (tracer_flags & trace_opts[i].bit)
4298 seq_printf(m, "%s\n", trace_opts[i].name);
4299 else
4300 seq_printf(m, "no%s\n", trace_opts[i].name);
4301 }
4302 mutex_unlock(&trace_types_lock);
4303
4304 return 0;
4305 }
4306
4307 static int __set_tracer_option(struct trace_array *tr,
4308 struct tracer_flags *tracer_flags,
4309 struct tracer_opt *opts, int neg)
4310 {
4311 struct tracer *trace = tracer_flags->trace;
4312 int ret;
4313
4314 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg);
4315 if (ret)
4316 return ret;
4317
4318 if (neg)
4319 tracer_flags->val &= ~opts->bit;
4320 else
4321 tracer_flags->val |= opts->bit;
4322 return 0;
4323 }
4324
4325 /* Try to assign a tracer specific option */
4326 static int set_tracer_option(struct trace_array *tr, char *cmp, int neg)
4327 {
4328 struct tracer *trace = tr->current_trace;
4329 struct tracer_flags *tracer_flags = trace->flags;
4330 struct tracer_opt *opts = NULL;
4331 int i;
4332
4333 for (i = 0; tracer_flags->opts[i].name; i++) {
4334 opts = &tracer_flags->opts[i];
4335
4336 if (strcmp(cmp, opts->name) == 0)
4337 return __set_tracer_option(tr, trace->flags, opts, neg);
4338 }
4339
4340 return -EINVAL;
4341 }
4342
4343 /* Some tracers require overwrite to stay enabled */
4344 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
4345 {
4346 if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set)
4347 return -1;
4348
4349 return 0;
4350 }
4351
4352 int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
4353 {
4354 /* do nothing if flag is already set */
4355 if (!!(tr->trace_flags & mask) == !!enabled)
4356 return 0;
4357
4358 /* Give the tracer a chance to approve the change */
4359 if (tr->current_trace->flag_changed)
4360 if (tr->current_trace->flag_changed(tr, mask, !!enabled))
4361 return -EINVAL;
4362
4363 if (enabled)
4364 tr->trace_flags |= mask;
4365 else
4366 tr->trace_flags &= ~mask;
4367
4368 if (mask == TRACE_ITER_RECORD_CMD)
4369 trace_event_enable_cmd_record(enabled);
4370
4371 if (mask == TRACE_ITER_RECORD_TGID) {
4372 if (!tgid_map)
4373 tgid_map = kcalloc(PID_MAX_DEFAULT + 1,
4374 sizeof(*tgid_map),
4375 GFP_KERNEL);
4376 if (!tgid_map) {
4377 tr->trace_flags &= ~TRACE_ITER_RECORD_TGID;
4378 return -ENOMEM;
4379 }
4380
4381 trace_event_enable_tgid_record(enabled);
4382 }
4383
4384 if (mask == TRACE_ITER_EVENT_FORK)
4385 trace_event_follow_fork(tr, enabled);
4386
4387 if (mask == TRACE_ITER_FUNC_FORK)
4388 ftrace_pid_follow_fork(tr, enabled);
4389
4390 if (mask == TRACE_ITER_OVERWRITE) {
4391 ring_buffer_change_overwrite(tr->trace_buffer.buffer, enabled);
4392 #ifdef CONFIG_TRACER_MAX_TRACE
4393 ring_buffer_change_overwrite(tr->max_buffer.buffer, enabled);
4394 #endif
4395 }
4396
4397 if (mask == TRACE_ITER_PRINTK) {
4398 trace_printk_start_stop_comm(enabled);
4399 trace_printk_control(enabled);
4400 }
4401
4402 return 0;
4403 }
4404
4405 static int trace_set_options(struct trace_array *tr, char *option)
4406 {
4407 char *cmp;
4408 int neg = 0;
4409 int ret;
4410 size_t orig_len = strlen(option);
4411
4412 cmp = strstrip(option);
4413
4414 if (strncmp(cmp, "no", 2) == 0) {
4415 neg = 1;
4416 cmp += 2;
4417 }
4418
4419 mutex_lock(&trace_types_lock);
4420
4421 ret = match_string(trace_options, -1, cmp);
4422 /* If no option could be set, test the specific tracer options */
4423 if (ret < 0)
4424 ret = set_tracer_option(tr, cmp, neg);
4425 else
4426 ret = set_tracer_flag(tr, 1 << ret, !neg);
4427
4428 mutex_unlock(&trace_types_lock);
4429
4430 /*
4431 * If the first trailing whitespace is replaced with '\0' by strstrip,
4432 * turn it back into a space.
4433 */
4434 if (orig_len > strlen(option))
4435 option[strlen(option)] = ' ';
4436
4437 return ret;
4438 }
4439
4440 static void __init apply_trace_boot_options(void)
4441 {
4442 char *buf = trace_boot_options_buf;
4443 char *option;
4444
4445 while (true) {
4446 option = strsep(&buf, ",");
4447
4448 if (!option)
4449 break;
4450
4451 if (*option)
4452 trace_set_options(&global_trace, option);
4453
4454 /* Put back the comma to allow this to be called again */
4455 if (buf)
4456 *(buf - 1) = ',';
4457 }
4458 }
4459
4460 static ssize_t
4461 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
4462 size_t cnt, loff_t *ppos)
4463 {
4464 struct seq_file *m = filp->private_data;
4465 struct trace_array *tr = m->private;
4466 char buf[64];
4467 int ret;
4468
4469 if (cnt >= sizeof(buf))
4470 return -EINVAL;
4471
4472 if (copy_from_user(buf, ubuf, cnt))
4473 return -EFAULT;
4474
4475 buf[cnt] = 0;
4476
4477 ret = trace_set_options(tr, buf);
4478 if (ret < 0)
4479 return ret;
4480
4481 *ppos += cnt;
4482
4483 return cnt;
4484 }
4485
4486 static int tracing_trace_options_open(struct inode *inode, struct file *file)
4487 {
4488 struct trace_array *tr = inode->i_private;
4489 int ret;
4490
4491 if (tracing_disabled)
4492 return -ENODEV;
4493
4494 if (trace_array_get(tr) < 0)
4495 return -ENODEV;
4496
4497 ret = single_open(file, tracing_trace_options_show, inode->i_private);
4498 if (ret < 0)
4499 trace_array_put(tr);
4500
4501 return ret;
4502 }
4503
4504 static const struct file_operations tracing_iter_fops = {
4505 .open = tracing_trace_options_open,
4506 .read = seq_read,
4507 .llseek = seq_lseek,
4508 .release = tracing_single_release_tr,
4509 .write = tracing_trace_options_write,
4510 };
4511
4512 static const char readme_msg[] =
4513 "tracing mini-HOWTO:\n\n"
4514 "# echo 0 > tracing_on : quick way to disable tracing\n"
4515 "# echo 1 > tracing_on : quick way to re-enable tracing\n\n"
4516 " Important files:\n"
4517 " trace\t\t\t- The static contents of the buffer\n"
4518 "\t\t\t To clear the buffer write into this file: echo > trace\n"
4519 " trace_pipe\t\t- A consuming read to see the contents of the buffer\n"
4520 " current_tracer\t- function and latency tracers\n"
4521 " available_tracers\t- list of configured tracers for current_tracer\n"
4522 " buffer_size_kb\t- view and modify size of per cpu buffer\n"
4523 " buffer_total_size_kb - view total size of all cpu buffers\n\n"
4524 " trace_clock\t\t-change the clock used to order events\n"
4525 " local: Per cpu clock but may not be synced across CPUs\n"
4526 " global: Synced across CPUs but slows tracing down.\n"
4527 " counter: Not a clock, but just an increment\n"
4528 " uptime: Jiffy counter from time of boot\n"
4529 " perf: Same clock that perf events use\n"
4530 #ifdef CONFIG_X86_64
4531 " x86-tsc: TSC cycle counter\n"
4532 #endif
4533 "\n timestamp_mode\t-view the mode used to timestamp events\n"
4534 " delta: Delta difference against a buffer-wide timestamp\n"
4535 " absolute: Absolute (standalone) timestamp\n"
4536 "\n trace_marker\t\t- Writes into this file writes into the kernel buffer\n"
4537 "\n trace_marker_raw\t\t- Writes into this file writes binary data into the kernel buffer\n"
4538 " tracing_cpumask\t- Limit which CPUs to trace\n"
4539 " instances\t\t- Make sub-buffers with: mkdir instances/foo\n"
4540 "\t\t\t Remove sub-buffer with rmdir\n"
4541 " trace_options\t\t- Set format or modify how tracing happens\n"
4542 "\t\t\t Disable an option by adding a suffix 'no' to the\n"
4543 "\t\t\t option name\n"
4544 " saved_cmdlines_size\t- echo command number in here to store comm-pid list\n"
4545 #ifdef CONFIG_DYNAMIC_FTRACE
4546 "\n available_filter_functions - list of functions that can be filtered on\n"
4547 " set_ftrace_filter\t- echo function name in here to only trace these\n"
4548 "\t\t\t functions\n"
4549 "\t accepts: func_full_name or glob-matching-pattern\n"
4550 "\t modules: Can select a group via module\n"
4551 "\t Format: :mod:<module-name>\n"
4552 "\t example: echo :mod:ext3 > set_ftrace_filter\n"
4553 "\t triggers: a command to perform when function is hit\n"
4554 "\t Format: <function>:<trigger>[:count]\n"
4555 "\t trigger: traceon, traceoff\n"
4556 "\t\t enable_event:<system>:<event>\n"
4557 "\t\t disable_event:<system>:<event>\n"
4558 #ifdef CONFIG_STACKTRACE
4559 "\t\t stacktrace\n"
4560 #endif
4561 #ifdef CONFIG_TRACER_SNAPSHOT
4562 "\t\t snapshot\n"
4563 #endif
4564 "\t\t dump\n"
4565 "\t\t cpudump\n"
4566 "\t example: echo do_fault:traceoff > set_ftrace_filter\n"
4567 "\t echo do_trap:traceoff:3 > set_ftrace_filter\n"
4568 "\t The first one will disable tracing every time do_fault is hit\n"
4569 "\t The second will disable tracing at most 3 times when do_trap is hit\n"
4570 "\t The first time do trap is hit and it disables tracing, the\n"
4571 "\t counter will decrement to 2. If tracing is already disabled,\n"
4572 "\t the counter will not decrement. It only decrements when the\n"
4573 "\t trigger did work\n"
4574 "\t To remove trigger without count:\n"
4575 "\t echo '!<function>:<trigger> > set_ftrace_filter\n"
4576 "\t To remove trigger with a count:\n"
4577 "\t echo '!<function>:<trigger>:0 > set_ftrace_filter\n"
4578 " set_ftrace_notrace\t- echo function name in here to never trace.\n"
4579 "\t accepts: func_full_name, *func_end, func_begin*, *func_middle*\n"
4580 "\t modules: Can select a group via module command :mod:\n"
4581 "\t Does not accept triggers\n"
4582 #endif /* CONFIG_DYNAMIC_FTRACE */
4583 #ifdef CONFIG_FUNCTION_TRACER
4584 " set_ftrace_pid\t- Write pid(s) to only function trace those pids\n"
4585 "\t\t (function)\n"
4586 #endif
4587 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
4588 " set_graph_function\t- Trace the nested calls of a function (function_graph)\n"
4589 " set_graph_notrace\t- Do not trace the nested calls of a function (function_graph)\n"
4590 " max_graph_depth\t- Trace a limited depth of nested calls (0 is unlimited)\n"
4591 #endif
4592 #ifdef CONFIG_TRACER_SNAPSHOT
4593 "\n snapshot\t\t- Like 'trace' but shows the content of the static\n"
4594 "\t\t\t snapshot buffer. Read the contents for more\n"
4595 "\t\t\t information\n"
4596 #endif
4597 #ifdef CONFIG_STACK_TRACER
4598 " stack_trace\t\t- Shows the max stack trace when active\n"
4599 " stack_max_size\t- Shows current max stack size that was traced\n"
4600 "\t\t\t Write into this file to reset the max size (trigger a\n"
4601 "\t\t\t new trace)\n"
4602 #ifdef CONFIG_DYNAMIC_FTRACE
4603 " stack_trace_filter\t- Like set_ftrace_filter but limits what stack_trace\n"
4604 "\t\t\t traces\n"
4605 #endif
4606 #endif /* CONFIG_STACK_TRACER */
4607 #ifdef CONFIG_DYNAMIC_EVENTS
4608 " dynamic_events\t\t- Add/remove/show the generic dynamic events\n"
4609 "\t\t\t Write into this file to define/undefine new trace events.\n"
4610 #endif
4611 #ifdef CONFIG_KPROBE_EVENTS
4612 " kprobe_events\t\t- Add/remove/show the kernel dynamic events\n"
4613 "\t\t\t Write into this file to define/undefine new trace events.\n"
4614 #endif
4615 #ifdef CONFIG_UPROBE_EVENTS
4616 " uprobe_events\t\t- Add/remove/show the userspace dynamic events\n"
4617 "\t\t\t Write into this file to define/undefine new trace events.\n"
4618 #endif
4619 #if defined(CONFIG_KPROBE_EVENTS) || defined(CONFIG_UPROBE_EVENTS)
4620 "\t accepts: event-definitions (one definition per line)\n"
4621 "\t Format: p[:[<group>/]<event>] <place> [<args>]\n"
4622 "\t r[maxactive][:[<group>/]<event>] <place> [<args>]\n"
4623 "\t -:[<group>/]<event>\n"
4624 #ifdef CONFIG_KPROBE_EVENTS
4625 "\t place: [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4626 "place (kretprobe): [<module>:]<symbol>[+<offset>]|<memaddr>\n"
4627 #endif
4628 #ifdef CONFIG_UPROBE_EVENTS
4629 " place (uprobe): <path>:<offset>[(ref_ctr_offset)]\n"
4630 #endif
4631 "\t args: <name>=fetcharg[:type]\n"
4632 "\t fetcharg: %<register>, @<address>, @<symbol>[+|-<offset>],\n"
4633 #ifdef CONFIG_HAVE_FUNCTION_ARG_ACCESS_API
4634 "\t $stack<index>, $stack, $retval, $comm, $arg<N>\n"
4635 #else
4636 "\t $stack<index>, $stack, $retval, $comm\n"
4637 #endif
4638 "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n"
4639 "\t b<bit-width>@<bit-offset>/<container-size>,\n"
4640 "\t <type>\\[<array-size>\\]\n"
4641 #endif
4642 " events/\t\t- Directory containing all trace event subsystems:\n"
4643 " enable\t\t- Write 0/1 to enable/disable tracing of all events\n"
4644 " events/<system>/\t- Directory containing all trace events for <system>:\n"
4645 " enable\t\t- Write 0/1 to enable/disable tracing of all <system>\n"
4646 "\t\t\t events\n"
4647 " filter\t\t- If set, only events passing filter are traced\n"
4648 " events/<system>/<event>/\t- Directory containing control files for\n"
4649 "\t\t\t <event>:\n"
4650 " enable\t\t- Write 0/1 to enable/disable tracing of <event>\n"
4651 " filter\t\t- If set, only events passing filter are traced\n"
4652 " trigger\t\t- If set, a command to perform when event is hit\n"
4653 "\t Format: <trigger>[:count][if <filter>]\n"
4654 "\t trigger: traceon, traceoff\n"
4655 "\t enable_event:<system>:<event>\n"
4656 "\t disable_event:<system>:<event>\n"
4657 #ifdef CONFIG_HIST_TRIGGERS
4658 "\t enable_hist:<system>:<event>\n"
4659 "\t disable_hist:<system>:<event>\n"
4660 #endif
4661 #ifdef CONFIG_STACKTRACE
4662 "\t\t stacktrace\n"
4663 #endif
4664 #ifdef CONFIG_TRACER_SNAPSHOT
4665 "\t\t snapshot\n"
4666 #endif
4667 #ifdef CONFIG_HIST_TRIGGERS
4668 "\t\t hist (see below)\n"
4669 #endif
4670 "\t example: echo traceoff > events/block/block_unplug/trigger\n"
4671 "\t echo traceoff:3 > events/block/block_unplug/trigger\n"
4672 "\t echo 'enable_event:kmem:kmalloc:3 if nr_rq > 1' > \\\n"
4673 "\t events/block/block_unplug/trigger\n"
4674 "\t The first disables tracing every time block_unplug is hit.\n"
4675 "\t The second disables tracing the first 3 times block_unplug is hit.\n"
4676 "\t The third enables the kmalloc event the first 3 times block_unplug\n"
4677 "\t is hit and has value of greater than 1 for the 'nr_rq' event field.\n"
4678 "\t Like function triggers, the counter is only decremented if it\n"
4679 "\t enabled or disabled tracing.\n"
4680 "\t To remove a trigger without a count:\n"
4681 "\t echo '!<trigger> > <system>/<event>/trigger\n"
4682 "\t To remove a trigger with a count:\n"
4683 "\t echo '!<trigger>:0 > <system>/<event>/trigger\n"
4684 "\t Filters can be ignored when removing a trigger.\n"
4685 #ifdef CONFIG_HIST_TRIGGERS
4686 " hist trigger\t- If set, event hits are aggregated into a hash table\n"
4687 "\t Format: hist:keys=<field1[,field2,...]>\n"
4688 "\t [:values=<field1[,field2,...]>]\n"
4689 "\t [:sort=<field1[,field2,...]>]\n"
4690 "\t [:size=#entries]\n"
4691 "\t [:pause][:continue][:clear]\n"
4692 "\t [:name=histname1]\n"
4693 "\t [if <filter>]\n\n"
4694 "\t When a matching event is hit, an entry is added to a hash\n"
4695 "\t table using the key(s) and value(s) named, and the value of a\n"
4696 "\t sum called 'hitcount' is incremented. Keys and values\n"
4697 "\t correspond to fields in the event's format description. Keys\n"
4698 "\t can be any field, or the special string 'stacktrace'.\n"
4699 "\t Compound keys consisting of up to two fields can be specified\n"
4700 "\t by the 'keys' keyword. Values must correspond to numeric\n"
4701 "\t fields. Sort keys consisting of up to two fields can be\n"
4702 "\t specified using the 'sort' keyword. The sort direction can\n"
4703 "\t be modified by appending '.descending' or '.ascending' to a\n"
4704 "\t sort field. The 'size' parameter can be used to specify more\n"
4705 "\t or fewer than the default 2048 entries for the hashtable size.\n"
4706 "\t If a hist trigger is given a name using the 'name' parameter,\n"
4707 "\t its histogram data will be shared with other triggers of the\n"
4708 "\t same name, and trigger hits will update this common data.\n\n"
4709 "\t Reading the 'hist' file for the event will dump the hash\n"
4710 "\t table in its entirety to stdout. If there are multiple hist\n"
4711 "\t triggers attached to an event, there will be a table for each\n"
4712 "\t trigger in the output. The table displayed for a named\n"
4713 "\t trigger will be the same as any other instance having the\n"
4714 "\t same name. The default format used to display a given field\n"
4715 "\t can be modified by appending any of the following modifiers\n"
4716 "\t to the field name, as applicable:\n\n"
4717 "\t .hex display a number as a hex value\n"
4718 "\t .sym display an address as a symbol\n"
4719 "\t .sym-offset display an address as a symbol and offset\n"
4720 "\t .execname display a common_pid as a program name\n"
4721 "\t .syscall display a syscall id as a syscall name\n"
4722 "\t .log2 display log2 value rather than raw number\n"
4723 "\t .usecs display a common_timestamp in microseconds\n\n"
4724 "\t The 'pause' parameter can be used to pause an existing hist\n"
4725 "\t trigger or to start a hist trigger but not log any events\n"
4726 "\t until told to do so. 'continue' can be used to start or\n"
4727 "\t restart a paused hist trigger.\n\n"
4728 "\t The 'clear' parameter will clear the contents of a running\n"
4729 "\t hist trigger and leave its current paused/active state\n"
4730 "\t unchanged.\n\n"
4731 "\t The enable_hist and disable_hist triggers can be used to\n"
4732 "\t have one event conditionally start and stop another event's\n"
4733 "\t already-attached hist trigger. The syntax is analagous to\n"
4734 "\t the enable_event and disable_event triggers.\n"
4735 #endif
4736 ;
4737
4738 static ssize_t
4739 tracing_readme_read(struct file *filp, char __user *ubuf,
4740 size_t cnt, loff_t *ppos)
4741 {
4742 return simple_read_from_buffer(ubuf, cnt, ppos,
4743 readme_msg, strlen(readme_msg));
4744 }
4745
4746 static const struct file_operations tracing_readme_fops = {
4747 .open = tracing_open_generic,
4748 .read = tracing_readme_read,
4749 .llseek = generic_file_llseek,
4750 };
4751
4752 static void *saved_tgids_next(struct seq_file *m, void *v, loff_t *pos)
4753 {
4754 int *ptr = v;
4755
4756 if (*pos || m->count)
4757 ptr++;
4758
4759 (*pos)++;
4760
4761 for (; ptr <= &tgid_map[PID_MAX_DEFAULT]; ptr++) {
4762 if (trace_find_tgid(*ptr))
4763 return ptr;
4764 }
4765
4766 return NULL;
4767 }
4768
4769 static void *saved_tgids_start(struct seq_file *m, loff_t *pos)
4770 {
4771 void *v;
4772 loff_t l = 0;
4773
4774 if (!tgid_map)
4775 return NULL;
4776
4777 v = &tgid_map[0];
4778 while (l <= *pos) {
4779 v = saved_tgids_next(m, v, &l);
4780 if (!v)
4781 return NULL;
4782 }
4783
4784 return v;
4785 }
4786
4787 static void saved_tgids_stop(struct seq_file *m, void *v)
4788 {
4789 }
4790
4791 static int saved_tgids_show(struct seq_file *m, void *v)
4792 {
4793 int pid = (int *)v - tgid_map;
4794
4795 seq_printf(m, "%d %d\n", pid, trace_find_tgid(pid));
4796 return 0;
4797 }
4798
4799 static const struct seq_operations tracing_saved_tgids_seq_ops = {
4800 .start = saved_tgids_start,
4801 .stop = saved_tgids_stop,
4802 .next = saved_tgids_next,
4803 .show = saved_tgids_show,
4804 };
4805
4806 static int tracing_saved_tgids_open(struct inode *inode, struct file *filp)
4807 {
4808 if (tracing_disabled)
4809 return -ENODEV;
4810
4811 return seq_open(filp, &tracing_saved_tgids_seq_ops);
4812 }
4813
4814
4815 static const struct file_operations tracing_saved_tgids_fops = {
4816 .open = tracing_saved_tgids_open,
4817 .read = seq_read,
4818 .llseek = seq_lseek,
4819 .release = seq_release,
4820 };
4821
4822 static void *saved_cmdlines_next(struct seq_file *m, void *v, loff_t *pos)
4823 {
4824 unsigned int *ptr = v;
4825
4826 if (*pos || m->count)
4827 ptr++;
4828
4829 (*pos)++;
4830
4831 for (; ptr < &savedcmd->map_cmdline_to_pid[savedcmd->cmdline_num];
4832 ptr++) {
4833 if (*ptr == -1 || *ptr == NO_CMDLINE_MAP)
4834 continue;
4835
4836 return ptr;
4837 }
4838
4839 return NULL;
4840 }
4841
4842 static void *saved_cmdlines_start(struct seq_file *m, loff_t *pos)
4843 {
4844 void *v;
4845 loff_t l = 0;
4846
4847 preempt_disable();
4848 arch_spin_lock(&trace_cmdline_lock);
4849
4850 v = &savedcmd->map_cmdline_to_pid[0];
4851 while (l <= *pos) {
4852 v = saved_cmdlines_next(m, v, &l);
4853 if (!v)
4854 return NULL;
4855 }
4856
4857 return v;
4858 }
4859
4860 static void saved_cmdlines_stop(struct seq_file *m, void *v)
4861 {
4862 arch_spin_unlock(&trace_cmdline_lock);
4863 preempt_enable();
4864 }
4865
4866 static int saved_cmdlines_show(struct seq_file *m, void *v)
4867 {
4868 char buf[TASK_COMM_LEN];
4869 unsigned int *pid = v;
4870
4871 __trace_find_cmdline(*pid, buf);
4872 seq_printf(m, "%d %s\n", *pid, buf);
4873 return 0;
4874 }
4875
4876 static const struct seq_operations tracing_saved_cmdlines_seq_ops = {
4877 .start = saved_cmdlines_start,
4878 .next = saved_cmdlines_next,
4879 .stop = saved_cmdlines_stop,
4880 .show = saved_cmdlines_show,
4881 };
4882
4883 static int tracing_saved_cmdlines_open(struct inode *inode, struct file *filp)
4884 {
4885 if (tracing_disabled)
4886 return -ENODEV;
4887
4888 return seq_open(filp, &tracing_saved_cmdlines_seq_ops);
4889 }
4890
4891 static const struct file_operations tracing_saved_cmdlines_fops = {
4892 .open = tracing_saved_cmdlines_open,
4893 .read = seq_read,
4894 .llseek = seq_lseek,
4895 .release = seq_release,
4896 };
4897
4898 static ssize_t
4899 tracing_saved_cmdlines_size_read(struct file *filp, char __user *ubuf,
4900 size_t cnt, loff_t *ppos)
4901 {
4902 char buf[64];
4903 int r;
4904
4905 arch_spin_lock(&trace_cmdline_lock);
4906 r = scnprintf(buf, sizeof(buf), "%u\n", savedcmd->cmdline_num);
4907 arch_spin_unlock(&trace_cmdline_lock);
4908
4909 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4910 }
4911
4912 static void free_saved_cmdlines_buffer(struct saved_cmdlines_buffer *s)
4913 {
4914 kfree(s->saved_cmdlines);
4915 kfree(s->map_cmdline_to_pid);
4916 kfree(s);
4917 }
4918
4919 static int tracing_resize_saved_cmdlines(unsigned int val)
4920 {
4921 struct saved_cmdlines_buffer *s, *savedcmd_temp;
4922
4923 s = kmalloc(sizeof(*s), GFP_KERNEL);
4924 if (!s)
4925 return -ENOMEM;
4926
4927 if (allocate_cmdlines_buffer(val, s) < 0) {
4928 kfree(s);
4929 return -ENOMEM;
4930 }
4931
4932 arch_spin_lock(&trace_cmdline_lock);
4933 savedcmd_temp = savedcmd;
4934 savedcmd = s;
4935 arch_spin_unlock(&trace_cmdline_lock);
4936 free_saved_cmdlines_buffer(savedcmd_temp);
4937
4938 return 0;
4939 }
4940
4941 static ssize_t
4942 tracing_saved_cmdlines_size_write(struct file *filp, const char __user *ubuf,
4943 size_t cnt, loff_t *ppos)
4944 {
4945 unsigned long val;
4946 int ret;
4947
4948 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4949 if (ret)
4950 return ret;
4951
4952 /* must have at least 1 entry or less than PID_MAX_DEFAULT */
4953 if (!val || val > PID_MAX_DEFAULT)
4954 return -EINVAL;
4955
4956 ret = tracing_resize_saved_cmdlines((unsigned int)val);
4957 if (ret < 0)
4958 return ret;
4959
4960 *ppos += cnt;
4961
4962 return cnt;
4963 }
4964
4965 static const struct file_operations tracing_saved_cmdlines_size_fops = {
4966 .open = tracing_open_generic,
4967 .read = tracing_saved_cmdlines_size_read,
4968 .write = tracing_saved_cmdlines_size_write,
4969 };
4970
4971 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
4972 static union trace_eval_map_item *
4973 update_eval_map(union trace_eval_map_item *ptr)
4974 {
4975 if (!ptr->map.eval_string) {
4976 if (ptr->tail.next) {
4977 ptr = ptr->tail.next;
4978 /* Set ptr to the next real item (skip head) */
4979 ptr++;
4980 } else
4981 return NULL;
4982 }
4983 return ptr;
4984 }
4985
4986 static void *eval_map_next(struct seq_file *m, void *v, loff_t *pos)
4987 {
4988 union trace_eval_map_item *ptr = v;
4989
4990 /*
4991 * Paranoid! If ptr points to end, we don't want to increment past it.
4992 * This really should never happen.
4993 */
4994 ptr = update_eval_map(ptr);
4995 if (WARN_ON_ONCE(!ptr))
4996 return NULL;
4997
4998 ptr++;
4999
5000 (*pos)++;
5001
5002 ptr = update_eval_map(ptr);
5003
5004 return ptr;
5005 }
5006
5007 static void *eval_map_start(struct seq_file *m, loff_t *pos)
5008 {
5009 union trace_eval_map_item *v;
5010 loff_t l = 0;
5011
5012 mutex_lock(&trace_eval_mutex);
5013
5014 v = trace_eval_maps;
5015 if (v)
5016 v++;
5017
5018 while (v && l < *pos) {
5019 v = eval_map_next(m, v, &l);
5020 }
5021
5022 return v;
5023 }
5024
5025 static void eval_map_stop(struct seq_file *m, void *v)
5026 {
5027 mutex_unlock(&trace_eval_mutex);
5028 }
5029
5030 static int eval_map_show(struct seq_file *m, void *v)
5031 {
5032 union trace_eval_map_item *ptr = v;
5033
5034 seq_printf(m, "%s %ld (%s)\n",
5035 ptr->map.eval_string, ptr->map.eval_value,
5036 ptr->map.system);
5037
5038 return 0;
5039 }
5040
5041 static const struct seq_operations tracing_eval_map_seq_ops = {
5042 .start = eval_map_start,
5043 .next = eval_map_next,
5044 .stop = eval_map_stop,
5045 .show = eval_map_show,
5046 };
5047
5048 static int tracing_eval_map_open(struct inode *inode, struct file *filp)
5049 {
5050 if (tracing_disabled)
5051 return -ENODEV;
5052
5053 return seq_open(filp, &tracing_eval_map_seq_ops);
5054 }
5055
5056 static const struct file_operations tracing_eval_map_fops = {
5057 .open = tracing_eval_map_open,
5058 .read = seq_read,
5059 .llseek = seq_lseek,
5060 .release = seq_release,
5061 };
5062
5063 static inline union trace_eval_map_item *
5064 trace_eval_jmp_to_tail(union trace_eval_map_item *ptr)
5065 {
5066 /* Return tail of array given the head */
5067 return ptr + ptr->head.length + 1;
5068 }
5069
5070 static void
5071 trace_insert_eval_map_file(struct module *mod, struct trace_eval_map **start,
5072 int len)
5073 {
5074 struct trace_eval_map **stop;
5075 struct trace_eval_map **map;
5076 union trace_eval_map_item *map_array;
5077 union trace_eval_map_item *ptr;
5078
5079 stop = start + len;
5080
5081 /*
5082 * The trace_eval_maps contains the map plus a head and tail item,
5083 * where the head holds the module and length of array, and the
5084 * tail holds a pointer to the next list.
5085 */
5086 map_array = kmalloc_array(len + 2, sizeof(*map_array), GFP_KERNEL);
5087 if (!map_array) {
5088 pr_warn("Unable to allocate trace eval mapping\n");
5089 return;
5090 }
5091
5092 mutex_lock(&trace_eval_mutex);
5093
5094 if (!trace_eval_maps)
5095 trace_eval_maps = map_array;
5096 else {
5097 ptr = trace_eval_maps;
5098 for (;;) {
5099 ptr = trace_eval_jmp_to_tail(ptr);
5100 if (!ptr->tail.next)
5101 break;
5102 ptr = ptr->tail.next;
5103
5104 }
5105 ptr->tail.next = map_array;
5106 }
5107 map_array->head.mod = mod;
5108 map_array->head.length = len;
5109 map_array++;
5110
5111 for (map = start; (unsigned long)map < (unsigned long)stop; map++) {
5112 map_array->map = **map;
5113 map_array++;
5114 }
5115 memset(map_array, 0, sizeof(*map_array));
5116
5117 mutex_unlock(&trace_eval_mutex);
5118 }
5119
5120 static void trace_create_eval_file(struct dentry *d_tracer)
5121 {
5122 trace_create_file("eval_map", 0444, d_tracer,
5123 NULL, &tracing_eval_map_fops);
5124 }
5125
5126 #else /* CONFIG_TRACE_EVAL_MAP_FILE */
5127 static inline void trace_create_eval_file(struct dentry *d_tracer) { }
5128 static inline void trace_insert_eval_map_file(struct module *mod,
5129 struct trace_eval_map **start, int len) { }
5130 #endif /* !CONFIG_TRACE_EVAL_MAP_FILE */
5131
5132 static void trace_insert_eval_map(struct module *mod,
5133 struct trace_eval_map **start, int len)
5134 {
5135 struct trace_eval_map **map;
5136
5137 if (len <= 0)
5138 return;
5139
5140 map = start;
5141
5142 trace_event_eval_update(map, len);
5143
5144 trace_insert_eval_map_file(mod, start, len);
5145 }
5146
5147 static ssize_t
5148 tracing_set_trace_read(struct file *filp, char __user *ubuf,
5149 size_t cnt, loff_t *ppos)
5150 {
5151 struct trace_array *tr = filp->private_data;
5152 char buf[MAX_TRACER_SIZE+2];
5153 int r;
5154
5155 mutex_lock(&trace_types_lock);
5156 r = sprintf(buf, "%s\n", tr->current_trace->name);
5157 mutex_unlock(&trace_types_lock);
5158
5159 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5160 }
5161
5162 int tracer_init(struct tracer *t, struct trace_array *tr)
5163 {
5164 tracing_reset_online_cpus(&tr->trace_buffer);
5165 return t->init(tr);
5166 }
5167
5168 static void set_buffer_entries(struct trace_buffer *buf, unsigned long val)
5169 {
5170 int cpu;
5171
5172 for_each_tracing_cpu(cpu)
5173 per_cpu_ptr(buf->data, cpu)->entries = val;
5174 }
5175
5176 #ifdef CONFIG_TRACER_MAX_TRACE
5177 /* resize @tr's buffer to the size of @size_tr's entries */
5178 static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
5179 struct trace_buffer *size_buf, int cpu_id)
5180 {
5181 int cpu, ret = 0;
5182
5183 if (cpu_id == RING_BUFFER_ALL_CPUS) {
5184 for_each_tracing_cpu(cpu) {
5185 ret = ring_buffer_resize(trace_buf->buffer,
5186 per_cpu_ptr(size_buf->data, cpu)->entries, cpu);
5187 if (ret < 0)
5188 break;
5189 per_cpu_ptr(trace_buf->data, cpu)->entries =
5190 per_cpu_ptr(size_buf->data, cpu)->entries;
5191 }
5192 } else {
5193 ret = ring_buffer_resize(trace_buf->buffer,
5194 per_cpu_ptr(size_buf->data, cpu_id)->entries, cpu_id);
5195 if (ret == 0)
5196 per_cpu_ptr(trace_buf->data, cpu_id)->entries =
5197 per_cpu_ptr(size_buf->data, cpu_id)->entries;
5198 }
5199
5200 return ret;
5201 }
5202 #endif /* CONFIG_TRACER_MAX_TRACE */
5203
5204 static int __tracing_resize_ring_buffer(struct trace_array *tr,
5205 unsigned long size, int cpu)
5206 {
5207 int ret;
5208
5209 /*
5210 * If kernel or user changes the size of the ring buffer
5211 * we use the size that was given, and we can forget about
5212 * expanding it later.
5213 */
5214 ring_buffer_expanded = true;
5215
5216 /* May be called before buffers are initialized */
5217 if (!tr->trace_buffer.buffer)
5218 return 0;
5219
5220 ret = ring_buffer_resize(tr->trace_buffer.buffer, size, cpu);
5221 if (ret < 0)
5222 return ret;
5223
5224 #ifdef CONFIG_TRACER_MAX_TRACE
5225 if (!(tr->flags & TRACE_ARRAY_FL_GLOBAL) ||
5226 !tr->current_trace->use_max_tr)
5227 goto out;
5228
5229 ret = ring_buffer_resize(tr->max_buffer.buffer, size, cpu);
5230 if (ret < 0) {
5231 int r = resize_buffer_duplicate_size(&tr->trace_buffer,
5232 &tr->trace_buffer, cpu);
5233 if (r < 0) {
5234 /*
5235 * AARGH! We are left with different
5236 * size max buffer!!!!
5237 * The max buffer is our "snapshot" buffer.
5238 * When a tracer needs a snapshot (one of the
5239 * latency tracers), it swaps the max buffer
5240 * with the saved snap shot. We succeeded to
5241 * update the size of the main buffer, but failed to
5242 * update the size of the max buffer. But when we tried
5243 * to reset the main buffer to the original size, we
5244 * failed there too. This is very unlikely to
5245 * happen, but if it does, warn and kill all
5246 * tracing.
5247 */
5248 WARN_ON(1);
5249 tracing_disabled = 1;
5250 }
5251 return ret;
5252 }
5253
5254 if (cpu == RING_BUFFER_ALL_CPUS)
5255 set_buffer_entries(&tr->max_buffer, size);
5256 else
5257 per_cpu_ptr(tr->max_buffer.data, cpu)->entries = size;
5258
5259 out:
5260 #endif /* CONFIG_TRACER_MAX_TRACE */
5261
5262 if (cpu == RING_BUFFER_ALL_CPUS)
5263 set_buffer_entries(&tr->trace_buffer, size);
5264 else
5265 per_cpu_ptr(tr->trace_buffer.data, cpu)->entries = size;
5266
5267 return ret;
5268 }
5269
5270 static ssize_t tracing_resize_ring_buffer(struct trace_array *tr,
5271 unsigned long size, int cpu_id)
5272 {
5273 int ret = size;
5274
5275 mutex_lock(&trace_types_lock);
5276
5277 if (cpu_id != RING_BUFFER_ALL_CPUS) {
5278 /* make sure, this cpu is enabled in the mask */
5279 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
5280 ret = -EINVAL;
5281 goto out;
5282 }
5283 }
5284
5285 ret = __tracing_resize_ring_buffer(tr, size, cpu_id);
5286 if (ret < 0)
5287 ret = -ENOMEM;
5288
5289 out:
5290 mutex_unlock(&trace_types_lock);
5291
5292 return ret;
5293 }
5294
5295
5296 /**
5297 * tracing_update_buffers - used by tracing facility to expand ring buffers
5298 *
5299 * To save on memory when the tracing is never used on a system with it
5300 * configured in. The ring buffers are set to a minimum size. But once
5301 * a user starts to use the tracing facility, then they need to grow
5302 * to their default size.
5303 *
5304 * This function is to be called when a tracer is about to be used.
5305 */
5306 int tracing_update_buffers(void)
5307 {
5308 int ret = 0;
5309
5310 mutex_lock(&trace_types_lock);
5311 if (!ring_buffer_expanded)
5312 ret = __tracing_resize_ring_buffer(&global_trace, trace_buf_size,
5313 RING_BUFFER_ALL_CPUS);
5314 mutex_unlock(&trace_types_lock);
5315
5316 return ret;
5317 }
5318
5319 struct trace_option_dentry;
5320
5321 static void
5322 create_trace_option_files(struct trace_array *tr, struct tracer *tracer);
5323
5324 /*
5325 * Used to clear out the tracer before deletion of an instance.
5326 * Must have trace_types_lock held.
5327 */
5328 static void tracing_set_nop(struct trace_array *tr)
5329 {
5330 if (tr->current_trace == &nop_trace)
5331 return;
5332
5333 tr->current_trace->enabled--;
5334
5335 if (tr->current_trace->reset)
5336 tr->current_trace->reset(tr);
5337
5338 tr->current_trace = &nop_trace;
5339 }
5340
5341 static void add_tracer_options(struct trace_array *tr, struct tracer *t)
5342 {
5343 /* Only enable if the directory has been created already. */
5344 if (!tr->dir)
5345 return;
5346
5347 create_trace_option_files(tr, t);
5348 }
5349
5350 static int tracing_set_tracer(struct trace_array *tr, const char *buf)
5351 {
5352 struct tracer *t;
5353 #ifdef CONFIG_TRACER_MAX_TRACE
5354 bool had_max_tr;
5355 #endif
5356 int ret = 0;
5357
5358 mutex_lock(&trace_types_lock);
5359
5360 if (!ring_buffer_expanded) {
5361 ret = __tracing_resize_ring_buffer(tr, trace_buf_size,
5362 RING_BUFFER_ALL_CPUS);
5363 if (ret < 0)
5364 goto out;
5365 ret = 0;
5366 }
5367
5368 for (t = trace_types; t; t = t->next) {
5369 if (strcmp(t->name, buf) == 0)
5370 break;
5371 }
5372 if (!t) {
5373 ret = -EINVAL;
5374 goto out;
5375 }
5376 if (t == tr->current_trace)
5377 goto out;
5378
5379 /* Some tracers won't work on kernel command line */
5380 if (system_state < SYSTEM_RUNNING && t->noboot) {
5381 pr_warn("Tracer '%s' is not allowed on command line, ignored\n",
5382 t->name);
5383 goto out;
5384 }
5385
5386 /* Some tracers are only allowed for the top level buffer */
5387 if (!trace_ok_for_array(t, tr)) {
5388 ret = -EINVAL;
5389 goto out;
5390 }
5391
5392 /* If trace pipe files are being read, we can't change the tracer */
5393 if (tr->current_trace->ref) {
5394 ret = -EBUSY;
5395 goto out;
5396 }
5397
5398 trace_branch_disable();
5399
5400 tr->current_trace->enabled--;
5401
5402 if (tr->current_trace->reset)
5403 tr->current_trace->reset(tr);
5404
5405 /* Current trace needs to be nop_trace before synchronize_sched */
5406 tr->current_trace = &nop_trace;
5407
5408 #ifdef CONFIG_TRACER_MAX_TRACE
5409 had_max_tr = tr->allocated_snapshot;
5410
5411 if (had_max_tr && !t->use_max_tr) {
5412 /*
5413 * We need to make sure that the update_max_tr sees that
5414 * current_trace changed to nop_trace to keep it from
5415 * swapping the buffers after we resize it.
5416 * The update_max_tr is called from interrupts disabled
5417 * so a synchronized_sched() is sufficient.
5418 */
5419 synchronize_sched();
5420 free_snapshot(tr);
5421 }
5422 #endif
5423
5424 #ifdef CONFIG_TRACER_MAX_TRACE
5425 if (t->use_max_tr && !had_max_tr) {
5426 ret = tracing_alloc_snapshot_instance(tr);
5427 if (ret < 0)
5428 goto out;
5429 }
5430 #endif
5431
5432 if (t->init) {
5433 ret = tracer_init(t, tr);
5434 if (ret)
5435 goto out;
5436 }
5437
5438 tr->current_trace = t;
5439 tr->current_trace->enabled++;
5440 trace_branch_enable(tr);
5441 out:
5442 mutex_unlock(&trace_types_lock);
5443
5444 return ret;
5445 }
5446
5447 static ssize_t
5448 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
5449 size_t cnt, loff_t *ppos)
5450 {
5451 struct trace_array *tr = filp->private_data;
5452 char buf[MAX_TRACER_SIZE+1];
5453 int i;
5454 size_t ret;
5455 int err;
5456
5457 ret = cnt;
5458
5459 if (cnt > MAX_TRACER_SIZE)
5460 cnt = MAX_TRACER_SIZE;
5461
5462 if (copy_from_user(buf, ubuf, cnt))
5463 return -EFAULT;
5464
5465 buf[cnt] = 0;
5466
5467 /* strip ending whitespace. */
5468 for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
5469 buf[i] = 0;
5470
5471 err = tracing_set_tracer(tr, buf);
5472 if (err)
5473 return err;
5474
5475 *ppos += ret;
5476
5477 return ret;
5478 }
5479
5480 static ssize_t
5481 tracing_nsecs_read(unsigned long *ptr, char __user *ubuf,
5482 size_t cnt, loff_t *ppos)
5483 {
5484 char buf[64];
5485 int r;
5486
5487 r = snprintf(buf, sizeof(buf), "%ld\n",
5488 *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
5489 if (r > sizeof(buf))
5490 r = sizeof(buf);
5491 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
5492 }
5493
5494 static ssize_t
5495 tracing_nsecs_write(unsigned long *ptr, const char __user *ubuf,
5496 size_t cnt, loff_t *ppos)
5497 {
5498 unsigned long val;
5499 int ret;
5500
5501 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
5502 if (ret)
5503 return ret;
5504
5505 *ptr = val * 1000;
5506
5507 return cnt;
5508 }
5509
5510 static ssize_t
5511 tracing_thresh_read(struct file *filp, char __user *ubuf,
5512 size_t cnt, loff_t *ppos)
5513 {
5514 return tracing_nsecs_read(&tracing_thresh, ubuf, cnt, ppos);
5515 }
5516
5517 static ssize_t
5518 tracing_thresh_write(struct file *filp, const char __user *ubuf,
5519 size_t cnt, loff_t *ppos)
5520 {
5521 struct trace_array *tr = filp->private_data;
5522 int ret;
5523
5524 mutex_lock(&trace_types_lock);
5525 ret = tracing_nsecs_write(&tracing_thresh, ubuf, cnt, ppos);
5526 if (ret < 0)
5527 goto out;
5528
5529 if (tr->current_trace->update_thresh) {
5530 ret = tr->current_trace->update_thresh(tr);
5531 if (ret < 0)
5532 goto out;
5533 }
5534
5535 ret = cnt;
5536 out:
5537 mutex_unlock(&trace_types_lock);
5538
5539 return ret;
5540 }
5541
5542 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
5543
5544 static ssize_t
5545 tracing_max_lat_read(struct file *filp, char __user *ubuf,
5546 size_t cnt, loff_t *ppos)
5547 {
5548 return tracing_nsecs_read(filp->private_data, ubuf, cnt, ppos);
5549 }
5550
5551 static ssize_t
5552 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
5553 size_t cnt, loff_t *ppos)
5554 {
5555 return tracing_nsecs_write(filp->private_data, ubuf, cnt, ppos);
5556 }
5557
5558 #endif
5559
5560 static int tracing_open_pipe(struct inode *inode, struct file *filp)
5561 {
5562 struct trace_array *tr = inode->i_private;
5563 struct trace_iterator *iter;
5564 int ret = 0;
5565
5566 if (tracing_disabled)
5567 return -ENODEV;
5568
5569 if (trace_array_get(tr) < 0)
5570 return -ENODEV;
5571
5572 mutex_lock(&trace_types_lock);
5573
5574 /* create a buffer to store the information to pass to userspace */
5575 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
5576 if (!iter) {
5577 ret = -ENOMEM;
5578 __trace_array_put(tr);
5579 goto out;
5580 }
5581
5582 trace_seq_init(&iter->seq);
5583 iter->trace = tr->current_trace;
5584
5585 if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
5586 ret = -ENOMEM;
5587 goto fail;
5588 }
5589
5590 /* trace pipe does not show start of buffer */
5591 cpumask_setall(iter->started);
5592
5593 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
5594 iter->iter_flags |= TRACE_FILE_LAT_FMT;
5595
5596 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
5597 if (trace_clocks[tr->clock_id].in_ns)
5598 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
5599
5600 iter->tr = tr;
5601 iter->trace_buffer = &tr->trace_buffer;
5602 iter->cpu_file = tracing_get_cpu(inode);
5603 mutex_init(&iter->mutex);
5604 filp->private_data = iter;
5605
5606 if (iter->trace->pipe_open)
5607 iter->trace->pipe_open(iter);
5608
5609 nonseekable_open(inode, filp);
5610
5611 tr->current_trace->ref++;
5612 out:
5613 mutex_unlock(&trace_types_lock);
5614 return ret;
5615
5616 fail:
5617 kfree(iter->trace);
5618 kfree(iter);
5619 __trace_array_put(tr);
5620 mutex_unlock(&trace_types_lock);
5621 return ret;
5622 }
5623
5624 static int tracing_release_pipe(struct inode *inode, struct file *file)
5625 {
5626 struct trace_iterator *iter = file->private_data;
5627 struct trace_array *tr = inode->i_private;
5628
5629 mutex_lock(&trace_types_lock);
5630
5631 tr->current_trace->ref--;
5632
5633 if (iter->trace->pipe_close)
5634 iter->trace->pipe_close(iter);
5635
5636 mutex_unlock(&trace_types_lock);
5637
5638 free_cpumask_var(iter->started);
5639 mutex_destroy(&iter->mutex);
5640 kfree(iter);
5641
5642 trace_array_put(tr);
5643
5644 return 0;
5645 }
5646
5647 static __poll_t
5648 trace_poll(struct trace_iterator *iter, struct file *filp, poll_table *poll_table)
5649 {
5650 struct trace_array *tr = iter->tr;
5651
5652 /* Iterators are static, they should be filled or empty */
5653 if (trace_buffer_iter(iter, iter->cpu_file))
5654 return EPOLLIN | EPOLLRDNORM;
5655
5656 if (tr->trace_flags & TRACE_ITER_BLOCK)
5657 /*
5658 * Always select as readable when in blocking mode
5659 */
5660 return EPOLLIN | EPOLLRDNORM;
5661 else
5662 return ring_buffer_poll_wait(iter->trace_buffer->buffer, iter->cpu_file,
5663 filp, poll_table);
5664 }
5665
5666 static __poll_t
5667 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
5668 {
5669 struct trace_iterator *iter = filp->private_data;
5670
5671 return trace_poll(iter, filp, poll_table);
5672 }
5673
5674 /* Must be called with iter->mutex held. */
5675 static int tracing_wait_pipe(struct file *filp)
5676 {
5677 struct trace_iterator *iter = filp->private_data;
5678 int ret;
5679
5680 while (trace_empty(iter)) {
5681
5682 if ((filp->f_flags & O_NONBLOCK)) {
5683 return -EAGAIN;
5684 }
5685
5686 /*
5687 * We block until we read something and tracing is disabled.
5688 * We still block if tracing is disabled, but we have never
5689 * read anything. This allows a user to cat this file, and
5690 * then enable tracing. But after we have read something,
5691 * we give an EOF when tracing is again disabled.
5692 *
5693 * iter->pos will be 0 if we haven't read anything.
5694 */
5695 if (!tracer_tracing_is_on(iter->tr) && iter->pos)
5696 break;
5697
5698 mutex_unlock(&iter->mutex);
5699
5700 ret = wait_on_pipe(iter, 0);
5701
5702 mutex_lock(&iter->mutex);
5703
5704 if (ret)
5705 return ret;
5706 }
5707
5708 return 1;
5709 }
5710
5711 /*
5712 * Consumer reader.
5713 */
5714 static ssize_t
5715 tracing_read_pipe(struct file *filp, char __user *ubuf,
5716 size_t cnt, loff_t *ppos)
5717 {
5718 struct trace_iterator *iter = filp->private_data;
5719 ssize_t sret;
5720
5721 /*
5722 * Avoid more than one consumer on a single file descriptor
5723 * This is just a matter of traces coherency, the ring buffer itself
5724 * is protected.
5725 */
5726 mutex_lock(&iter->mutex);
5727
5728 /* return any leftover data */
5729 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5730 if (sret != -EBUSY)
5731 goto out;
5732
5733 trace_seq_init(&iter->seq);
5734
5735 if (iter->trace->read) {
5736 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
5737 if (sret)
5738 goto out;
5739 }
5740
5741 waitagain:
5742 sret = tracing_wait_pipe(filp);
5743 if (sret <= 0)
5744 goto out;
5745
5746 /* stop when tracing is finished */
5747 if (trace_empty(iter)) {
5748 sret = 0;
5749 goto out;
5750 }
5751
5752 if (cnt >= PAGE_SIZE)
5753 cnt = PAGE_SIZE - 1;
5754
5755 /* reset all but tr, trace, and overruns */
5756 memset(&iter->seq, 0,
5757 sizeof(struct trace_iterator) -
5758 offsetof(struct trace_iterator, seq));
5759 cpumask_clear(iter->started);
5760 iter->pos = -1;
5761
5762 trace_event_read_lock();
5763 trace_access_lock(iter->cpu_file);
5764 while (trace_find_next_entry_inc(iter) != NULL) {
5765 enum print_line_t ret;
5766 int save_len = iter->seq.seq.len;
5767
5768 ret = print_trace_line(iter);
5769 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5770 /* don't print partial lines */
5771 iter->seq.seq.len = save_len;
5772 break;
5773 }
5774 if (ret != TRACE_TYPE_NO_CONSUME)
5775 trace_consume(iter);
5776
5777 if (trace_seq_used(&iter->seq) >= cnt)
5778 break;
5779
5780 /*
5781 * Setting the full flag means we reached the trace_seq buffer
5782 * size and we should leave by partial output condition above.
5783 * One of the trace_seq_* functions is not used properly.
5784 */
5785 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
5786 iter->ent->type);
5787 }
5788 trace_access_unlock(iter->cpu_file);
5789 trace_event_read_unlock();
5790
5791 /* Now copy what we have to the user */
5792 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
5793 if (iter->seq.seq.readpos >= trace_seq_used(&iter->seq))
5794 trace_seq_init(&iter->seq);
5795
5796 /*
5797 * If there was nothing to send to user, in spite of consuming trace
5798 * entries, go back to wait for more entries.
5799 */
5800 if (sret == -EBUSY)
5801 goto waitagain;
5802
5803 out:
5804 mutex_unlock(&iter->mutex);
5805
5806 return sret;
5807 }
5808
5809 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
5810 unsigned int idx)
5811 {
5812 __free_page(spd->pages[idx]);
5813 }
5814
5815 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
5816 .can_merge = 0,
5817 .confirm = generic_pipe_buf_confirm,
5818 .release = generic_pipe_buf_release,
5819 .steal = generic_pipe_buf_steal,
5820 .get = generic_pipe_buf_get,
5821 };
5822
5823 static size_t
5824 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
5825 {
5826 size_t count;
5827 int save_len;
5828 int ret;
5829
5830 /* Seq buffer is page-sized, exactly what we need. */
5831 for (;;) {
5832 save_len = iter->seq.seq.len;
5833 ret = print_trace_line(iter);
5834
5835 if (trace_seq_has_overflowed(&iter->seq)) {
5836 iter->seq.seq.len = save_len;
5837 break;
5838 }
5839
5840 /*
5841 * This should not be hit, because it should only
5842 * be set if the iter->seq overflowed. But check it
5843 * anyway to be safe.
5844 */
5845 if (ret == TRACE_TYPE_PARTIAL_LINE) {
5846 iter->seq.seq.len = save_len;
5847 break;
5848 }
5849
5850 count = trace_seq_used(&iter->seq) - save_len;
5851 if (rem < count) {
5852 rem = 0;
5853 iter->seq.seq.len = save_len;
5854 break;
5855 }
5856
5857 if (ret != TRACE_TYPE_NO_CONSUME)
5858 trace_consume(iter);
5859 rem -= count;
5860 if (!trace_find_next_entry_inc(iter)) {
5861 rem = 0;
5862 iter->ent = NULL;
5863 break;
5864 }
5865 }
5866
5867 return rem;
5868 }
5869
5870 static ssize_t tracing_splice_read_pipe(struct file *filp,
5871 loff_t *ppos,
5872 struct pipe_inode_info *pipe,
5873 size_t len,
5874 unsigned int flags)
5875 {
5876 struct page *pages_def[PIPE_DEF_BUFFERS];
5877 struct partial_page partial_def[PIPE_DEF_BUFFERS];
5878 struct trace_iterator *iter = filp->private_data;
5879 struct splice_pipe_desc spd = {
5880 .pages = pages_def,
5881 .partial = partial_def,
5882 .nr_pages = 0, /* This gets updated below. */
5883 .nr_pages_max = PIPE_DEF_BUFFERS,
5884 .ops = &tracing_pipe_buf_ops,
5885 .spd_release = tracing_spd_release_pipe,
5886 };
5887 ssize_t ret;
5888 size_t rem;
5889 unsigned int i;
5890
5891 if (splice_grow_spd(pipe, &spd))
5892 return -ENOMEM;
5893
5894 mutex_lock(&iter->mutex);
5895
5896 if (iter->trace->splice_read) {
5897 ret = iter->trace->splice_read(iter, filp,
5898 ppos, pipe, len, flags);
5899 if (ret)
5900 goto out_err;
5901 }
5902
5903 ret = tracing_wait_pipe(filp);
5904 if (ret <= 0)
5905 goto out_err;
5906
5907 if (!iter->ent && !trace_find_next_entry_inc(iter)) {
5908 ret = -EFAULT;
5909 goto out_err;
5910 }
5911
5912 trace_event_read_lock();
5913 trace_access_lock(iter->cpu_file);
5914
5915 /* Fill as many pages as possible. */
5916 for (i = 0, rem = len; i < spd.nr_pages_max && rem; i++) {
5917 spd.pages[i] = alloc_page(GFP_KERNEL);
5918 if (!spd.pages[i])
5919 break;
5920
5921 rem = tracing_fill_pipe_page(rem, iter);
5922
5923 /* Copy the data into the page, so we can start over. */
5924 ret = trace_seq_to_buffer(&iter->seq,
5925 page_address(spd.pages[i]),
5926 trace_seq_used(&iter->seq));
5927 if (ret < 0) {
5928 __free_page(spd.pages[i]);
5929 break;
5930 }
5931 spd.partial[i].offset = 0;
5932 spd.partial[i].len = trace_seq_used(&iter->seq);
5933
5934 trace_seq_init(&iter->seq);
5935 }
5936
5937 trace_access_unlock(iter->cpu_file);
5938 trace_event_read_unlock();
5939 mutex_unlock(&iter->mutex);
5940
5941 spd.nr_pages = i;
5942
5943 if (i)
5944 ret = splice_to_pipe(pipe, &spd);
5945 else
5946 ret = 0;
5947 out:
5948 splice_shrink_spd(&spd);
5949 return ret;
5950
5951 out_err:
5952 mutex_unlock(&iter->mutex);
5953 goto out;
5954 }
5955
5956 static ssize_t
5957 tracing_entries_read(struct file *filp, char __user *ubuf,
5958 size_t cnt, loff_t *ppos)
5959 {
5960 struct inode *inode = file_inode(filp);
5961 struct trace_array *tr = inode->i_private;
5962 int cpu = tracing_get_cpu(inode);
5963 char buf[64];
5964 int r = 0;
5965 ssize_t ret;
5966
5967 mutex_lock(&trace_types_lock);
5968
5969 if (cpu == RING_BUFFER_ALL_CPUS) {
5970 int cpu, buf_size_same;
5971 unsigned long size;
5972
5973 size = 0;
5974 buf_size_same = 1;
5975 /* check if all cpu sizes are same */
5976 for_each_tracing_cpu(cpu) {
5977 /* fill in the size from first enabled cpu */
5978 if (size == 0)
5979 size = per_cpu_ptr(tr->trace_buffer.data, cpu)->entries;
5980 if (size != per_cpu_ptr(tr->trace_buffer.data, cpu)->entries) {
5981 buf_size_same = 0;
5982 break;
5983 }
5984 }
5985
5986 if (buf_size_same) {
5987 if (!ring_buffer_expanded)
5988 r = sprintf(buf, "%lu (expanded: %lu)\n",
5989 size >> 10,
5990 trace_buf_size >> 10);
5991 else
5992 r = sprintf(buf, "%lu\n", size >> 10);
5993 } else
5994 r = sprintf(buf, "X\n");
5995 } else
5996 r = sprintf(buf, "%lu\n", per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10);
5997
5998 mutex_unlock(&trace_types_lock);
5999
6000 ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6001 return ret;
6002 }
6003
6004 static ssize_t
6005 tracing_entries_write(struct file *filp, const char __user *ubuf,
6006 size_t cnt, loff_t *ppos)
6007 {
6008 struct inode *inode = file_inode(filp);
6009 struct trace_array *tr = inode->i_private;
6010 unsigned long val;
6011 int ret;
6012
6013 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6014 if (ret)
6015 return ret;
6016
6017 /* must have at least 1 entry */
6018 if (!val)
6019 return -EINVAL;
6020
6021 /* value is in KB */
6022 val <<= 10;
6023 ret = tracing_resize_ring_buffer(tr, val, tracing_get_cpu(inode));
6024 if (ret < 0)
6025 return ret;
6026
6027 *ppos += cnt;
6028
6029 return cnt;
6030 }
6031
6032 static ssize_t
6033 tracing_total_entries_read(struct file *filp, char __user *ubuf,
6034 size_t cnt, loff_t *ppos)
6035 {
6036 struct trace_array *tr = filp->private_data;
6037 char buf[64];
6038 int r, cpu;
6039 unsigned long size = 0, expanded_size = 0;
6040
6041 mutex_lock(&trace_types_lock);
6042 for_each_tracing_cpu(cpu) {
6043 size += per_cpu_ptr(tr->trace_buffer.data, cpu)->entries >> 10;
6044 if (!ring_buffer_expanded)
6045 expanded_size += trace_buf_size >> 10;
6046 }
6047 if (ring_buffer_expanded)
6048 r = sprintf(buf, "%lu\n", size);
6049 else
6050 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
6051 mutex_unlock(&trace_types_lock);
6052
6053 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
6054 }
6055
6056 static ssize_t
6057 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
6058 size_t cnt, loff_t *ppos)
6059 {
6060 /*
6061 * There is no need to read what the user has written, this function
6062 * is just to make sure that there is no error when "echo" is used
6063 */
6064
6065 *ppos += cnt;
6066
6067 return cnt;
6068 }
6069
6070 static int
6071 tracing_free_buffer_release(struct inode *inode, struct file *filp)
6072 {
6073 struct trace_array *tr = inode->i_private;
6074
6075 /* disable tracing ? */
6076 if (tr->trace_flags & TRACE_ITER_STOP_ON_FREE)
6077 tracer_tracing_off(tr);
6078 /* resize the ring buffer to 0 */
6079 tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
6080
6081 trace_array_put(tr);
6082
6083 return 0;
6084 }
6085
6086 static ssize_t
6087 tracing_mark_write(struct file *filp, const char __user *ubuf,
6088 size_t cnt, loff_t *fpos)
6089 {
6090 struct trace_array *tr = filp->private_data;
6091 struct ring_buffer_event *event;
6092 enum event_trigger_type tt = ETT_NONE;
6093 struct ring_buffer *buffer;
6094 struct print_entry *entry;
6095 unsigned long irq_flags;
6096 const char faulted[] = "<faulted>";
6097 ssize_t written;
6098 int size;
6099 int len;
6100
6101 /* Used in tracing_mark_raw_write() as well */
6102 #define FAULTED_SIZE (sizeof(faulted) - 1) /* '\0' is already accounted for */
6103
6104 if (tracing_disabled)
6105 return -EINVAL;
6106
6107 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6108 return -EINVAL;
6109
6110 if (cnt > TRACE_BUF_SIZE)
6111 cnt = TRACE_BUF_SIZE;
6112
6113 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6114
6115 local_save_flags(irq_flags);
6116 size = sizeof(*entry) + cnt + 2; /* add '\0' and possible '\n' */
6117
6118 /* If less than "<faulted>", then make sure we can still add that */
6119 if (cnt < FAULTED_SIZE)
6120 size += FAULTED_SIZE - cnt;
6121
6122 buffer = tr->trace_buffer.buffer;
6123 event = __trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
6124 irq_flags, preempt_count());
6125 if (unlikely(!event))
6126 /* Ring buffer disabled, return as if not open for write */
6127 return -EBADF;
6128
6129 entry = ring_buffer_event_data(event);
6130 entry->ip = _THIS_IP_;
6131
6132 len = __copy_from_user_inatomic(&entry->buf, ubuf, cnt);
6133 if (len) {
6134 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6135 cnt = FAULTED_SIZE;
6136 written = -EFAULT;
6137 } else
6138 written = cnt;
6139 len = cnt;
6140
6141 if (tr->trace_marker_file && !list_empty(&tr->trace_marker_file->triggers)) {
6142 /* do not add \n before testing triggers, but add \0 */
6143 entry->buf[cnt] = '\0';
6144 tt = event_triggers_call(tr->trace_marker_file, entry, event);
6145 }
6146
6147 if (entry->buf[cnt - 1] != '\n') {
6148 entry->buf[cnt] = '\n';
6149 entry->buf[cnt + 1] = '\0';
6150 } else
6151 entry->buf[cnt] = '\0';
6152
6153 __buffer_unlock_commit(buffer, event);
6154
6155 if (tt)
6156 event_triggers_post_call(tr->trace_marker_file, tt);
6157
6158 if (written > 0)
6159 *fpos += written;
6160
6161 return written;
6162 }
6163
6164 /* Limit it for now to 3K (including tag) */
6165 #define RAW_DATA_MAX_SIZE (1024*3)
6166
6167 static ssize_t
6168 tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
6169 size_t cnt, loff_t *fpos)
6170 {
6171 struct trace_array *tr = filp->private_data;
6172 struct ring_buffer_event *event;
6173 struct ring_buffer *buffer;
6174 struct raw_data_entry *entry;
6175 const char faulted[] = "<faulted>";
6176 unsigned long irq_flags;
6177 ssize_t written;
6178 int size;
6179 int len;
6180
6181 #define FAULT_SIZE_ID (FAULTED_SIZE + sizeof(int))
6182
6183 if (tracing_disabled)
6184 return -EINVAL;
6185
6186 if (!(tr->trace_flags & TRACE_ITER_MARKERS))
6187 return -EINVAL;
6188
6189 /* The marker must at least have a tag id */
6190 if (cnt < sizeof(unsigned int) || cnt > RAW_DATA_MAX_SIZE)
6191 return -EINVAL;
6192
6193 if (cnt > TRACE_BUF_SIZE)
6194 cnt = TRACE_BUF_SIZE;
6195
6196 BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
6197
6198 local_save_flags(irq_flags);
6199 size = sizeof(*entry) + cnt;
6200 if (cnt < FAULT_SIZE_ID)
6201 size += FAULT_SIZE_ID - cnt;
6202
6203 buffer = tr->trace_buffer.buffer;
6204 event = __trace_buffer_lock_reserve(buffer, TRACE_RAW_DATA, size,
6205 irq_flags, preempt_count());
6206 if (!event)
6207 /* Ring buffer disabled, return as if not open for write */
6208 return -EBADF;
6209
6210 entry = ring_buffer_event_data(event);
6211
6212 len = __copy_from_user_inatomic(&entry->id, ubuf, cnt);
6213 if (len) {
6214 entry->id = -1;
6215 memcpy(&entry->buf, faulted, FAULTED_SIZE);
6216 written = -EFAULT;
6217 } else
6218 written = cnt;
6219
6220 __buffer_unlock_commit(buffer, event);
6221
6222 if (written > 0)
6223 *fpos += written;
6224
6225 return written;
6226 }
6227
6228 static int tracing_clock_show(struct seq_file *m, void *v)
6229 {
6230 struct trace_array *tr = m->private;
6231 int i;
6232
6233 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
6234 seq_printf(m,
6235 "%s%s%s%s", i ? " " : "",
6236 i == tr->clock_id ? "[" : "", trace_clocks[i].name,
6237 i == tr->clock_id ? "]" : "");
6238 seq_putc(m, '\n');
6239
6240 return 0;
6241 }
6242
6243 int tracing_set_clock(struct trace_array *tr, const char *clockstr)
6244 {
6245 int i;
6246
6247 for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
6248 if (strcmp(trace_clocks[i].name, clockstr) == 0)
6249 break;
6250 }
6251 if (i == ARRAY_SIZE(trace_clocks))
6252 return -EINVAL;
6253
6254 mutex_lock(&trace_types_lock);
6255
6256 tr->clock_id = i;
6257
6258 ring_buffer_set_clock(tr->trace_buffer.buffer, trace_clocks[i].func);
6259
6260 /*
6261 * New clock may not be consistent with the previous clock.
6262 * Reset the buffer so that it doesn't have incomparable timestamps.
6263 */
6264 tracing_reset_online_cpus(&tr->trace_buffer);
6265
6266 #ifdef CONFIG_TRACER_MAX_TRACE
6267 if (tr->max_buffer.buffer)
6268 ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
6269 tracing_reset_online_cpus(&tr->max_buffer);
6270 #endif
6271
6272 mutex_unlock(&trace_types_lock);
6273
6274 return 0;
6275 }
6276
6277 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
6278 size_t cnt, loff_t *fpos)
6279 {
6280 struct seq_file *m = filp->private_data;
6281 struct trace_array *tr = m->private;
6282 char buf[64];
6283 const char *clockstr;
6284 int ret;
6285
6286 if (cnt >= sizeof(buf))
6287 return -EINVAL;
6288
6289 if (copy_from_user(buf, ubuf, cnt))
6290 return -EFAULT;
6291
6292 buf[cnt] = 0;
6293
6294 clockstr = strstrip(buf);
6295
6296 ret = tracing_set_clock(tr, clockstr);
6297 if (ret)
6298 return ret;
6299
6300 *fpos += cnt;
6301
6302 return cnt;
6303 }
6304
6305 static int tracing_clock_open(struct inode *inode, struct file *file)
6306 {
6307 struct trace_array *tr = inode->i_private;
6308 int ret;
6309
6310 if (tracing_disabled)
6311 return -ENODEV;
6312
6313 if (trace_array_get(tr))
6314 return -ENODEV;
6315
6316 ret = single_open(file, tracing_clock_show, inode->i_private);
6317 if (ret < 0)
6318 trace_array_put(tr);
6319
6320 return ret;
6321 }
6322
6323 static int tracing_time_stamp_mode_show(struct seq_file *m, void *v)
6324 {
6325 struct trace_array *tr = m->private;
6326
6327 mutex_lock(&trace_types_lock);
6328
6329 if (ring_buffer_time_stamp_abs(tr->trace_buffer.buffer))
6330 seq_puts(m, "delta [absolute]\n");
6331 else
6332 seq_puts(m, "[delta] absolute\n");
6333
6334 mutex_unlock(&trace_types_lock);
6335
6336 return 0;
6337 }
6338
6339 static int tracing_time_stamp_mode_open(struct inode *inode, struct file *file)
6340 {
6341 struct trace_array *tr = inode->i_private;
6342 int ret;
6343
6344 if (tracing_disabled)
6345 return -ENODEV;
6346
6347 if (trace_array_get(tr))
6348 return -ENODEV;
6349
6350 ret = single_open(file, tracing_time_stamp_mode_show, inode->i_private);
6351 if (ret < 0)
6352 trace_array_put(tr);
6353
6354 return ret;
6355 }
6356
6357 int tracing_set_time_stamp_abs(struct trace_array *tr, bool abs)
6358 {
6359 int ret = 0;
6360
6361 mutex_lock(&trace_types_lock);
6362
6363 if (abs && tr->time_stamp_abs_ref++)
6364 goto out;
6365
6366 if (!abs) {
6367 if (WARN_ON_ONCE(!tr->time_stamp_abs_ref)) {
6368 ret = -EINVAL;
6369 goto out;
6370 }
6371
6372 if (--tr->time_stamp_abs_ref)
6373 goto out;
6374 }
6375
6376 ring_buffer_set_time_stamp_abs(tr->trace_buffer.buffer, abs);
6377
6378 #ifdef CONFIG_TRACER_MAX_TRACE
6379 if (tr->max_buffer.buffer)
6380 ring_buffer_set_time_stamp_abs(tr->max_buffer.buffer, abs);
6381 #endif
6382 out:
6383 mutex_unlock(&trace_types_lock);
6384
6385 return ret;
6386 }
6387
6388 struct ftrace_buffer_info {
6389 struct trace_iterator iter;
6390 void *spare;
6391 unsigned int spare_cpu;
6392 unsigned int read;
6393 };
6394
6395 #ifdef CONFIG_TRACER_SNAPSHOT
6396 static int tracing_snapshot_open(struct inode *inode, struct file *file)
6397 {
6398 struct trace_array *tr = inode->i_private;
6399 struct trace_iterator *iter;
6400 struct seq_file *m;
6401 int ret = 0;
6402
6403 if (trace_array_get(tr) < 0)
6404 return -ENODEV;
6405
6406 if (file->f_mode & FMODE_READ) {
6407 iter = __tracing_open(inode, file, true);
6408 if (IS_ERR(iter))
6409 ret = PTR_ERR(iter);
6410 } else {
6411 /* Writes still need the seq_file to hold the private data */
6412 ret = -ENOMEM;
6413 m = kzalloc(sizeof(*m), GFP_KERNEL);
6414 if (!m)
6415 goto out;
6416 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
6417 if (!iter) {
6418 kfree(m);
6419 goto out;
6420 }
6421 ret = 0;
6422
6423 iter->tr = tr;
6424 iter->trace_buffer = &tr->max_buffer;
6425 iter->cpu_file = tracing_get_cpu(inode);
6426 m->private = iter;
6427 file->private_data = m;
6428 }
6429 out:
6430 if (ret < 0)
6431 trace_array_put(tr);
6432
6433 return ret;
6434 }
6435
6436 static ssize_t
6437 tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
6438 loff_t *ppos)
6439 {
6440 struct seq_file *m = filp->private_data;
6441 struct trace_iterator *iter = m->private;
6442 struct trace_array *tr = iter->tr;
6443 unsigned long val;
6444 int ret;
6445
6446 ret = tracing_update_buffers();
6447 if (ret < 0)
6448 return ret;
6449
6450 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
6451 if (ret)
6452 return ret;
6453
6454 mutex_lock(&trace_types_lock);
6455
6456 if (tr->current_trace->use_max_tr) {
6457 ret = -EBUSY;
6458 goto out;
6459 }
6460
6461 switch (val) {
6462 case 0:
6463 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6464 ret = -EINVAL;
6465 break;
6466 }
6467 if (tr->allocated_snapshot)
6468 free_snapshot(tr);
6469 break;
6470 case 1:
6471 /* Only allow per-cpu swap if the ring buffer supports it */
6472 #ifndef CONFIG_RING_BUFFER_ALLOW_SWAP
6473 if (iter->cpu_file != RING_BUFFER_ALL_CPUS) {
6474 ret = -EINVAL;
6475 break;
6476 }
6477 #endif
6478 if (!tr->allocated_snapshot) {
6479 ret = tracing_alloc_snapshot_instance(tr);
6480 if (ret < 0)
6481 break;
6482 }
6483 local_irq_disable();
6484 /* Now, we're going to swap */
6485 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6486 update_max_tr(tr, current, smp_processor_id());
6487 else
6488 update_max_tr_single(tr, current, iter->cpu_file);
6489 local_irq_enable();
6490 break;
6491 default:
6492 if (tr->allocated_snapshot) {
6493 if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
6494 tracing_reset_online_cpus(&tr->max_buffer);
6495 else
6496 tracing_reset(&tr->max_buffer, iter->cpu_file);
6497 }
6498 break;
6499 }
6500
6501 if (ret >= 0) {
6502 *ppos += cnt;
6503 ret = cnt;
6504 }
6505 out:
6506 mutex_unlock(&trace_types_lock);
6507 return ret;
6508 }
6509
6510 static int tracing_snapshot_release(struct inode *inode, struct file *file)
6511 {
6512 struct seq_file *m = file->private_data;
6513 int ret;
6514
6515 ret = tracing_release(inode, file);
6516
6517 if (file->f_mode & FMODE_READ)
6518 return ret;
6519
6520 /* If write only, the seq_file is just a stub */
6521 if (m)
6522 kfree(m->private);
6523 kfree(m);
6524
6525 return 0;
6526 }
6527
6528 static int tracing_buffers_open(struct inode *inode, struct file *filp);
6529 static ssize_t tracing_buffers_read(struct file *filp, char __user *ubuf,
6530 size_t count, loff_t *ppos);
6531 static int tracing_buffers_release(struct inode *inode, struct file *file);
6532 static ssize_t tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6533 struct pipe_inode_info *pipe, size_t len, unsigned int flags);
6534
6535 static int snapshot_raw_open(struct inode *inode, struct file *filp)
6536 {
6537 struct ftrace_buffer_info *info;
6538 int ret;
6539
6540 ret = tracing_buffers_open(inode, filp);
6541 if (ret < 0)
6542 return ret;
6543
6544 info = filp->private_data;
6545
6546 if (info->iter.trace->use_max_tr) {
6547 tracing_buffers_release(inode, filp);
6548 return -EBUSY;
6549 }
6550
6551 info->iter.snapshot = true;
6552 info->iter.trace_buffer = &info->iter.tr->max_buffer;
6553
6554 return ret;
6555 }
6556
6557 #endif /* CONFIG_TRACER_SNAPSHOT */
6558
6559
6560 static const struct file_operations tracing_thresh_fops = {
6561 .open = tracing_open_generic,
6562 .read = tracing_thresh_read,
6563 .write = tracing_thresh_write,
6564 .llseek = generic_file_llseek,
6565 };
6566
6567 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
6568 static const struct file_operations tracing_max_lat_fops = {
6569 .open = tracing_open_generic,
6570 .read = tracing_max_lat_read,
6571 .write = tracing_max_lat_write,
6572 .llseek = generic_file_llseek,
6573 };
6574 #endif
6575
6576 static const struct file_operations set_tracer_fops = {
6577 .open = tracing_open_generic,
6578 .read = tracing_set_trace_read,
6579 .write = tracing_set_trace_write,
6580 .llseek = generic_file_llseek,
6581 };
6582
6583 static const struct file_operations tracing_pipe_fops = {
6584 .open = tracing_open_pipe,
6585 .poll = tracing_poll_pipe,
6586 .read = tracing_read_pipe,
6587 .splice_read = tracing_splice_read_pipe,
6588 .release = tracing_release_pipe,
6589 .llseek = no_llseek,
6590 };
6591
6592 static const struct file_operations tracing_entries_fops = {
6593 .open = tracing_open_generic_tr,
6594 .read = tracing_entries_read,
6595 .write = tracing_entries_write,
6596 .llseek = generic_file_llseek,
6597 .release = tracing_release_generic_tr,
6598 };
6599
6600 static const struct file_operations tracing_total_entries_fops = {
6601 .open = tracing_open_generic_tr,
6602 .read = tracing_total_entries_read,
6603 .llseek = generic_file_llseek,
6604 .release = tracing_release_generic_tr,
6605 };
6606
6607 static const struct file_operations tracing_free_buffer_fops = {
6608 .open = tracing_open_generic_tr,
6609 .write = tracing_free_buffer_write,
6610 .release = tracing_free_buffer_release,
6611 };
6612
6613 static const struct file_operations tracing_mark_fops = {
6614 .open = tracing_open_generic_tr,
6615 .write = tracing_mark_write,
6616 .llseek = generic_file_llseek,
6617 .release = tracing_release_generic_tr,
6618 };
6619
6620 static const struct file_operations tracing_mark_raw_fops = {
6621 .open = tracing_open_generic_tr,
6622 .write = tracing_mark_raw_write,
6623 .llseek = generic_file_llseek,
6624 .release = tracing_release_generic_tr,
6625 };
6626
6627 static const struct file_operations trace_clock_fops = {
6628 .open = tracing_clock_open,
6629 .read = seq_read,
6630 .llseek = seq_lseek,
6631 .release = tracing_single_release_tr,
6632 .write = tracing_clock_write,
6633 };
6634
6635 static const struct file_operations trace_time_stamp_mode_fops = {
6636 .open = tracing_time_stamp_mode_open,
6637 .read = seq_read,
6638 .llseek = seq_lseek,
6639 .release = tracing_single_release_tr,
6640 };
6641
6642 #ifdef CONFIG_TRACER_SNAPSHOT
6643 static const struct file_operations snapshot_fops = {
6644 .open = tracing_snapshot_open,
6645 .read = seq_read,
6646 .write = tracing_snapshot_write,
6647 .llseek = tracing_lseek,
6648 .release = tracing_snapshot_release,
6649 };
6650
6651 static const struct file_operations snapshot_raw_fops = {
6652 .open = snapshot_raw_open,
6653 .read = tracing_buffers_read,
6654 .release = tracing_buffers_release,
6655 .splice_read = tracing_buffers_splice_read,
6656 .llseek = no_llseek,
6657 };
6658
6659 #endif /* CONFIG_TRACER_SNAPSHOT */
6660
6661 static int tracing_buffers_open(struct inode *inode, struct file *filp)
6662 {
6663 struct trace_array *tr = inode->i_private;
6664 struct ftrace_buffer_info *info;
6665 int ret;
6666
6667 if (tracing_disabled)
6668 return -ENODEV;
6669
6670 if (trace_array_get(tr) < 0)
6671 return -ENODEV;
6672
6673 info = kzalloc(sizeof(*info), GFP_KERNEL);
6674 if (!info) {
6675 trace_array_put(tr);
6676 return -ENOMEM;
6677 }
6678
6679 mutex_lock(&trace_types_lock);
6680
6681 info->iter.tr = tr;
6682 info->iter.cpu_file = tracing_get_cpu(inode);
6683 info->iter.trace = tr->current_trace;
6684 info->iter.trace_buffer = &tr->trace_buffer;
6685 info->spare = NULL;
6686 /* Force reading ring buffer for first read */
6687 info->read = (unsigned int)-1;
6688
6689 filp->private_data = info;
6690
6691 tr->current_trace->ref++;
6692
6693 mutex_unlock(&trace_types_lock);
6694
6695 ret = nonseekable_open(inode, filp);
6696 if (ret < 0)
6697 trace_array_put(tr);
6698
6699 return ret;
6700 }
6701
6702 static __poll_t
6703 tracing_buffers_poll(struct file *filp, poll_table *poll_table)
6704 {
6705 struct ftrace_buffer_info *info = filp->private_data;
6706 struct trace_iterator *iter = &info->iter;
6707
6708 return trace_poll(iter, filp, poll_table);
6709 }
6710
6711 static ssize_t
6712 tracing_buffers_read(struct file *filp, char __user *ubuf,
6713 size_t count, loff_t *ppos)
6714 {
6715 struct ftrace_buffer_info *info = filp->private_data;
6716 struct trace_iterator *iter = &info->iter;
6717 ssize_t ret = 0;
6718 ssize_t size;
6719
6720 if (!count)
6721 return 0;
6722
6723 #ifdef CONFIG_TRACER_MAX_TRACE
6724 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6725 return -EBUSY;
6726 #endif
6727
6728 if (!info->spare) {
6729 info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
6730 iter->cpu_file);
6731 if (IS_ERR(info->spare)) {
6732 ret = PTR_ERR(info->spare);
6733 info->spare = NULL;
6734 } else {
6735 info->spare_cpu = iter->cpu_file;
6736 }
6737 }
6738 if (!info->spare)
6739 return ret;
6740
6741 /* Do we have previous read data to read? */
6742 if (info->read < PAGE_SIZE)
6743 goto read;
6744
6745 again:
6746 trace_access_lock(iter->cpu_file);
6747 ret = ring_buffer_read_page(iter->trace_buffer->buffer,
6748 &info->spare,
6749 count,
6750 iter->cpu_file, 0);
6751 trace_access_unlock(iter->cpu_file);
6752
6753 if (ret < 0) {
6754 if (trace_empty(iter)) {
6755 if ((filp->f_flags & O_NONBLOCK))
6756 return -EAGAIN;
6757
6758 ret = wait_on_pipe(iter, 0);
6759 if (ret)
6760 return ret;
6761
6762 goto again;
6763 }
6764 return 0;
6765 }
6766
6767 info->read = 0;
6768 read:
6769 size = PAGE_SIZE - info->read;
6770 if (size > count)
6771 size = count;
6772
6773 ret = copy_to_user(ubuf, info->spare + info->read, size);
6774 if (ret == size)
6775 return -EFAULT;
6776
6777 size -= ret;
6778
6779 *ppos += size;
6780 info->read += size;
6781
6782 return size;
6783 }
6784
6785 static int tracing_buffers_release(struct inode *inode, struct file *file)
6786 {
6787 struct ftrace_buffer_info *info = file->private_data;
6788 struct trace_iterator *iter = &info->iter;
6789
6790 mutex_lock(&trace_types_lock);
6791
6792 iter->tr->current_trace->ref--;
6793
6794 __trace_array_put(iter->tr);
6795
6796 if (info->spare)
6797 ring_buffer_free_read_page(iter->trace_buffer->buffer,
6798 info->spare_cpu, info->spare);
6799 kfree(info);
6800
6801 mutex_unlock(&trace_types_lock);
6802
6803 return 0;
6804 }
6805
6806 struct buffer_ref {
6807 struct ring_buffer *buffer;
6808 void *page;
6809 int cpu;
6810 int ref;
6811 };
6812
6813 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
6814 struct pipe_buffer *buf)
6815 {
6816 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6817
6818 if (--ref->ref)
6819 return;
6820
6821 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6822 kfree(ref);
6823 buf->private = 0;
6824 }
6825
6826 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
6827 struct pipe_buffer *buf)
6828 {
6829 struct buffer_ref *ref = (struct buffer_ref *)buf->private;
6830
6831 ref->ref++;
6832 }
6833
6834 /* Pipe buffer operations for a buffer. */
6835 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
6836 .can_merge = 0,
6837 .confirm = generic_pipe_buf_confirm,
6838 .release = buffer_pipe_buf_release,
6839 .steal = generic_pipe_buf_steal,
6840 .get = buffer_pipe_buf_get,
6841 };
6842
6843 /*
6844 * Callback from splice_to_pipe(), if we need to release some pages
6845 * at the end of the spd in case we error'ed out in filling the pipe.
6846 */
6847 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
6848 {
6849 struct buffer_ref *ref =
6850 (struct buffer_ref *)spd->partial[i].private;
6851
6852 if (--ref->ref)
6853 return;
6854
6855 ring_buffer_free_read_page(ref->buffer, ref->cpu, ref->page);
6856 kfree(ref);
6857 spd->partial[i].private = 0;
6858 }
6859
6860 static ssize_t
6861 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
6862 struct pipe_inode_info *pipe, size_t len,
6863 unsigned int flags)
6864 {
6865 struct ftrace_buffer_info *info = file->private_data;
6866 struct trace_iterator *iter = &info->iter;
6867 struct partial_page partial_def[PIPE_DEF_BUFFERS];
6868 struct page *pages_def[PIPE_DEF_BUFFERS];
6869 struct splice_pipe_desc spd = {
6870 .pages = pages_def,
6871 .partial = partial_def,
6872 .nr_pages_max = PIPE_DEF_BUFFERS,
6873 .ops = &buffer_pipe_buf_ops,
6874 .spd_release = buffer_spd_release,
6875 };
6876 struct buffer_ref *ref;
6877 int entries, i;
6878 ssize_t ret = 0;
6879
6880 #ifdef CONFIG_TRACER_MAX_TRACE
6881 if (iter->snapshot && iter->tr->current_trace->use_max_tr)
6882 return -EBUSY;
6883 #endif
6884
6885 if (*ppos & (PAGE_SIZE - 1))
6886 return -EINVAL;
6887
6888 if (len & (PAGE_SIZE - 1)) {
6889 if (len < PAGE_SIZE)
6890 return -EINVAL;
6891 len &= PAGE_MASK;
6892 }
6893
6894 if (splice_grow_spd(pipe, &spd))
6895 return -ENOMEM;
6896
6897 again:
6898 trace_access_lock(iter->cpu_file);
6899 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6900
6901 for (i = 0; i < spd.nr_pages_max && len && entries; i++, len -= PAGE_SIZE) {
6902 struct page *page;
6903 int r;
6904
6905 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
6906 if (!ref) {
6907 ret = -ENOMEM;
6908 break;
6909 }
6910
6911 ref->ref = 1;
6912 ref->buffer = iter->trace_buffer->buffer;
6913 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
6914 if (IS_ERR(ref->page)) {
6915 ret = PTR_ERR(ref->page);
6916 ref->page = NULL;
6917 kfree(ref);
6918 break;
6919 }
6920 ref->cpu = iter->cpu_file;
6921
6922 r = ring_buffer_read_page(ref->buffer, &ref->page,
6923 len, iter->cpu_file, 1);
6924 if (r < 0) {
6925 ring_buffer_free_read_page(ref->buffer, ref->cpu,
6926 ref->page);
6927 kfree(ref);
6928 break;
6929 }
6930
6931 page = virt_to_page(ref->page);
6932
6933 spd.pages[i] = page;
6934 spd.partial[i].len = PAGE_SIZE;
6935 spd.partial[i].offset = 0;
6936 spd.partial[i].private = (unsigned long)ref;
6937 spd.nr_pages++;
6938 *ppos += PAGE_SIZE;
6939
6940 entries = ring_buffer_entries_cpu(iter->trace_buffer->buffer, iter->cpu_file);
6941 }
6942
6943 trace_access_unlock(iter->cpu_file);
6944 spd.nr_pages = i;
6945
6946 /* did we read anything? */
6947 if (!spd.nr_pages) {
6948 if (ret)
6949 goto out;
6950
6951 ret = -EAGAIN;
6952 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK))
6953 goto out;
6954
6955 ret = wait_on_pipe(iter, iter->tr->buffer_percent);
6956 if (ret)
6957 goto out;
6958
6959 goto again;
6960 }
6961
6962 ret = splice_to_pipe(pipe, &spd);
6963 out:
6964 splice_shrink_spd(&spd);
6965
6966 return ret;
6967 }
6968
6969 static const struct file_operations tracing_buffers_fops = {
6970 .open = tracing_buffers_open,
6971 .read = tracing_buffers_read,
6972 .poll = tracing_buffers_poll,
6973 .release = tracing_buffers_release,
6974 .splice_read = tracing_buffers_splice_read,
6975 .llseek = no_llseek,
6976 };
6977
6978 static ssize_t
6979 tracing_stats_read(struct file *filp, char __user *ubuf,
6980 size_t count, loff_t *ppos)
6981 {
6982 struct inode *inode = file_inode(filp);
6983 struct trace_array *tr = inode->i_private;
6984 struct trace_buffer *trace_buf = &tr->trace_buffer;
6985 int cpu = tracing_get_cpu(inode);
6986 struct trace_seq *s;
6987 unsigned long cnt;
6988 unsigned long long t;
6989 unsigned long usec_rem;
6990
6991 s = kmalloc(sizeof(*s), GFP_KERNEL);
6992 if (!s)
6993 return -ENOMEM;
6994
6995 trace_seq_init(s);
6996
6997 cnt = ring_buffer_entries_cpu(trace_buf->buffer, cpu);
6998 trace_seq_printf(s, "entries: %ld\n", cnt);
6999
7000 cnt = ring_buffer_overrun_cpu(trace_buf->buffer, cpu);
7001 trace_seq_printf(s, "overrun: %ld\n", cnt);
7002
7003 cnt = ring_buffer_commit_overrun_cpu(trace_buf->buffer, cpu);
7004 trace_seq_printf(s, "commit overrun: %ld\n", cnt);
7005
7006 cnt = ring_buffer_bytes_cpu(trace_buf->buffer, cpu);
7007 trace_seq_printf(s, "bytes: %ld\n", cnt);
7008
7009 if (trace_clocks[tr->clock_id].in_ns) {
7010 /* local or global for trace_clock */
7011 t = ns2usecs(ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7012 usec_rem = do_div(t, USEC_PER_SEC);
7013 trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n",
7014 t, usec_rem);
7015
7016 t = ns2usecs(ring_buffer_time_stamp(trace_buf->buffer, cpu));
7017 usec_rem = do_div(t, USEC_PER_SEC);
7018 trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
7019 } else {
7020 /* counter or tsc mode for trace_clock */
7021 trace_seq_printf(s, "oldest event ts: %llu\n",
7022 ring_buffer_oldest_event_ts(trace_buf->buffer, cpu));
7023
7024 trace_seq_printf(s, "now ts: %llu\n",
7025 ring_buffer_time_stamp(trace_buf->buffer, cpu));
7026 }
7027
7028 cnt = ring_buffer_dropped_events_cpu(trace_buf->buffer, cpu);
7029 trace_seq_printf(s, "dropped events: %ld\n", cnt);
7030
7031 cnt = ring_buffer_read_events_cpu(trace_buf->buffer, cpu);
7032 trace_seq_printf(s, "read events: %ld\n", cnt);
7033
7034 count = simple_read_from_buffer(ubuf, count, ppos,
7035 s->buffer, trace_seq_used(s));
7036
7037 kfree(s);
7038
7039 return count;
7040 }
7041
7042 static const struct file_operations tracing_stats_fops = {
7043 .open = tracing_open_generic_tr,
7044 .read = tracing_stats_read,
7045 .llseek = generic_file_llseek,
7046 .release = tracing_release_generic_tr,
7047 };
7048
7049 #ifdef CONFIG_DYNAMIC_FTRACE
7050
7051 static ssize_t
7052 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
7053 size_t cnt, loff_t *ppos)
7054 {
7055 unsigned long *p = filp->private_data;
7056 char buf[64]; /* Not too big for a shallow stack */
7057 int r;
7058
7059 r = scnprintf(buf, 63, "%ld", *p);
7060 buf[r++] = '\n';
7061
7062 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7063 }
7064
7065 static const struct file_operations tracing_dyn_info_fops = {
7066 .open = tracing_open_generic,
7067 .read = tracing_read_dyn_info,
7068 .llseek = generic_file_llseek,
7069 };
7070 #endif /* CONFIG_DYNAMIC_FTRACE */
7071
7072 #if defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE)
7073 static void
7074 ftrace_snapshot(unsigned long ip, unsigned long parent_ip,
7075 struct trace_array *tr, struct ftrace_probe_ops *ops,
7076 void *data)
7077 {
7078 tracing_snapshot_instance(tr);
7079 }
7080
7081 static void
7082 ftrace_count_snapshot(unsigned long ip, unsigned long parent_ip,
7083 struct trace_array *tr, struct ftrace_probe_ops *ops,
7084 void *data)
7085 {
7086 struct ftrace_func_mapper *mapper = data;
7087 long *count = NULL;
7088
7089 if (mapper)
7090 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7091
7092 if (count) {
7093
7094 if (*count <= 0)
7095 return;
7096
7097 (*count)--;
7098 }
7099
7100 tracing_snapshot_instance(tr);
7101 }
7102
7103 static int
7104 ftrace_snapshot_print(struct seq_file *m, unsigned long ip,
7105 struct ftrace_probe_ops *ops, void *data)
7106 {
7107 struct ftrace_func_mapper *mapper = data;
7108 long *count = NULL;
7109
7110 seq_printf(m, "%ps:", (void *)ip);
7111
7112 seq_puts(m, "snapshot");
7113
7114 if (mapper)
7115 count = (long *)ftrace_func_mapper_find_ip(mapper, ip);
7116
7117 if (count)
7118 seq_printf(m, ":count=%ld\n", *count);
7119 else
7120 seq_puts(m, ":unlimited\n");
7121
7122 return 0;
7123 }
7124
7125 static int
7126 ftrace_snapshot_init(struct ftrace_probe_ops *ops, struct trace_array *tr,
7127 unsigned long ip, void *init_data, void **data)
7128 {
7129 struct ftrace_func_mapper *mapper = *data;
7130
7131 if (!mapper) {
7132 mapper = allocate_ftrace_func_mapper();
7133 if (!mapper)
7134 return -ENOMEM;
7135 *data = mapper;
7136 }
7137
7138 return ftrace_func_mapper_add_ip(mapper, ip, init_data);
7139 }
7140
7141 static void
7142 ftrace_snapshot_free(struct ftrace_probe_ops *ops, struct trace_array *tr,
7143 unsigned long ip, void *data)
7144 {
7145 struct ftrace_func_mapper *mapper = data;
7146
7147 if (!ip) {
7148 if (!mapper)
7149 return;
7150 free_ftrace_func_mapper(mapper, NULL);
7151 return;
7152 }
7153
7154 ftrace_func_mapper_remove_ip(mapper, ip);
7155 }
7156
7157 static struct ftrace_probe_ops snapshot_probe_ops = {
7158 .func = ftrace_snapshot,
7159 .print = ftrace_snapshot_print,
7160 };
7161
7162 static struct ftrace_probe_ops snapshot_count_probe_ops = {
7163 .func = ftrace_count_snapshot,
7164 .print = ftrace_snapshot_print,
7165 .init = ftrace_snapshot_init,
7166 .free = ftrace_snapshot_free,
7167 };
7168
7169 static int
7170 ftrace_trace_snapshot_callback(struct trace_array *tr, struct ftrace_hash *hash,
7171 char *glob, char *cmd, char *param, int enable)
7172 {
7173 struct ftrace_probe_ops *ops;
7174 void *count = (void *)-1;
7175 char *number;
7176 int ret;
7177
7178 if (!tr)
7179 return -ENODEV;
7180
7181 /* hash funcs only work with set_ftrace_filter */
7182 if (!enable)
7183 return -EINVAL;
7184
7185 ops = param ? &snapshot_count_probe_ops : &snapshot_probe_ops;
7186
7187 if (glob[0] == '!')
7188 return unregister_ftrace_function_probe_func(glob+1, tr, ops);
7189
7190 if (!param)
7191 goto out_reg;
7192
7193 number = strsep(&param, ":");
7194
7195 if (!strlen(number))
7196 goto out_reg;
7197
7198 /*
7199 * We use the callback data field (which is a pointer)
7200 * as our counter.
7201 */
7202 ret = kstrtoul(number, 0, (unsigned long *)&count);
7203 if (ret)
7204 return ret;
7205
7206 out_reg:
7207 ret = tracing_alloc_snapshot_instance(tr);
7208 if (ret < 0)
7209 goto out;
7210
7211 ret = register_ftrace_function_probe(glob, tr, ops, count);
7212
7213 out:
7214 return ret < 0 ? ret : 0;
7215 }
7216
7217 static struct ftrace_func_command ftrace_snapshot_cmd = {
7218 .name = "snapshot",
7219 .func = ftrace_trace_snapshot_callback,
7220 };
7221
7222 static __init int register_snapshot_cmd(void)
7223 {
7224 return register_ftrace_command(&ftrace_snapshot_cmd);
7225 }
7226 #else
7227 static inline __init int register_snapshot_cmd(void) { return 0; }
7228 #endif /* defined(CONFIG_TRACER_SNAPSHOT) && defined(CONFIG_DYNAMIC_FTRACE) */
7229
7230 static struct dentry *tracing_get_dentry(struct trace_array *tr)
7231 {
7232 if (WARN_ON(!tr->dir))
7233 return ERR_PTR(-ENODEV);
7234
7235 /* Top directory uses NULL as the parent */
7236 if (tr->flags & TRACE_ARRAY_FL_GLOBAL)
7237 return NULL;
7238
7239 /* All sub buffers have a descriptor */
7240 return tr->dir;
7241 }
7242
7243 static struct dentry *tracing_dentry_percpu(struct trace_array *tr, int cpu)
7244 {
7245 struct dentry *d_tracer;
7246
7247 if (tr->percpu_dir)
7248 return tr->percpu_dir;
7249
7250 d_tracer = tracing_get_dentry(tr);
7251 if (IS_ERR(d_tracer))
7252 return NULL;
7253
7254 tr->percpu_dir = tracefs_create_dir("per_cpu", d_tracer);
7255
7256 WARN_ONCE(!tr->percpu_dir,
7257 "Could not create tracefs directory 'per_cpu/%d'\n", cpu);
7258
7259 return tr->percpu_dir;
7260 }
7261
7262 static struct dentry *
7263 trace_create_cpu_file(const char *name, umode_t mode, struct dentry *parent,
7264 void *data, long cpu, const struct file_operations *fops)
7265 {
7266 struct dentry *ret = trace_create_file(name, mode, parent, data, fops);
7267
7268 if (ret) /* See tracing_get_cpu() */
7269 d_inode(ret)->i_cdev = (void *)(cpu + 1);
7270 return ret;
7271 }
7272
7273 static void
7274 tracing_init_tracefs_percpu(struct trace_array *tr, long cpu)
7275 {
7276 struct dentry *d_percpu = tracing_dentry_percpu(tr, cpu);
7277 struct dentry *d_cpu;
7278 char cpu_dir[30]; /* 30 characters should be more than enough */
7279
7280 if (!d_percpu)
7281 return;
7282
7283 snprintf(cpu_dir, 30, "cpu%ld", cpu);
7284 d_cpu = tracefs_create_dir(cpu_dir, d_percpu);
7285 if (!d_cpu) {
7286 pr_warn("Could not create tracefs '%s' entry\n", cpu_dir);
7287 return;
7288 }
7289
7290 /* per cpu trace_pipe */
7291 trace_create_cpu_file("trace_pipe", 0444, d_cpu,
7292 tr, cpu, &tracing_pipe_fops);
7293
7294 /* per cpu trace */
7295 trace_create_cpu_file("trace", 0644, d_cpu,
7296 tr, cpu, &tracing_fops);
7297
7298 trace_create_cpu_file("trace_pipe_raw", 0444, d_cpu,
7299 tr, cpu, &tracing_buffers_fops);
7300
7301 trace_create_cpu_file("stats", 0444, d_cpu,
7302 tr, cpu, &tracing_stats_fops);
7303
7304 trace_create_cpu_file("buffer_size_kb", 0444, d_cpu,
7305 tr, cpu, &tracing_entries_fops);
7306
7307 #ifdef CONFIG_TRACER_SNAPSHOT
7308 trace_create_cpu_file("snapshot", 0644, d_cpu,
7309 tr, cpu, &snapshot_fops);
7310
7311 trace_create_cpu_file("snapshot_raw", 0444, d_cpu,
7312 tr, cpu, &snapshot_raw_fops);
7313 #endif
7314 }
7315
7316 #ifdef CONFIG_FTRACE_SELFTEST
7317 /* Let selftest have access to static functions in this file */
7318 #include "trace_selftest.c"
7319 #endif
7320
7321 static ssize_t
7322 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
7323 loff_t *ppos)
7324 {
7325 struct trace_option_dentry *topt = filp->private_data;
7326 char *buf;
7327
7328 if (topt->flags->val & topt->opt->bit)
7329 buf = "1\n";
7330 else
7331 buf = "0\n";
7332
7333 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7334 }
7335
7336 static ssize_t
7337 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
7338 loff_t *ppos)
7339 {
7340 struct trace_option_dentry *topt = filp->private_data;
7341 unsigned long val;
7342 int ret;
7343
7344 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7345 if (ret)
7346 return ret;
7347
7348 if (val != 0 && val != 1)
7349 return -EINVAL;
7350
7351 if (!!(topt->flags->val & topt->opt->bit) != val) {
7352 mutex_lock(&trace_types_lock);
7353 ret = __set_tracer_option(topt->tr, topt->flags,
7354 topt->opt, !val);
7355 mutex_unlock(&trace_types_lock);
7356 if (ret)
7357 return ret;
7358 }
7359
7360 *ppos += cnt;
7361
7362 return cnt;
7363 }
7364
7365
7366 static const struct file_operations trace_options_fops = {
7367 .open = tracing_open_generic,
7368 .read = trace_options_read,
7369 .write = trace_options_write,
7370 .llseek = generic_file_llseek,
7371 };
7372
7373 /*
7374 * In order to pass in both the trace_array descriptor as well as the index
7375 * to the flag that the trace option file represents, the trace_array
7376 * has a character array of trace_flags_index[], which holds the index
7377 * of the bit for the flag it represents. index[0] == 0, index[1] == 1, etc.
7378 * The address of this character array is passed to the flag option file
7379 * read/write callbacks.
7380 *
7381 * In order to extract both the index and the trace_array descriptor,
7382 * get_tr_index() uses the following algorithm.
7383 *
7384 * idx = *ptr;
7385 *
7386 * As the pointer itself contains the address of the index (remember
7387 * index[1] == 1).
7388 *
7389 * Then to get the trace_array descriptor, by subtracting that index
7390 * from the ptr, we get to the start of the index itself.
7391 *
7392 * ptr - idx == &index[0]
7393 *
7394 * Then a simple container_of() from that pointer gets us to the
7395 * trace_array descriptor.
7396 */
7397 static void get_tr_index(void *data, struct trace_array **ptr,
7398 unsigned int *pindex)
7399 {
7400 *pindex = *(unsigned char *)data;
7401
7402 *ptr = container_of(data - *pindex, struct trace_array,
7403 trace_flags_index);
7404 }
7405
7406 static ssize_t
7407 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
7408 loff_t *ppos)
7409 {
7410 void *tr_index = filp->private_data;
7411 struct trace_array *tr;
7412 unsigned int index;
7413 char *buf;
7414
7415 get_tr_index(tr_index, &tr, &index);
7416
7417 if (tr->trace_flags & (1 << index))
7418 buf = "1\n";
7419 else
7420 buf = "0\n";
7421
7422 return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
7423 }
7424
7425 static ssize_t
7426 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
7427 loff_t *ppos)
7428 {
7429 void *tr_index = filp->private_data;
7430 struct trace_array *tr;
7431 unsigned int index;
7432 unsigned long val;
7433 int ret;
7434
7435 get_tr_index(tr_index, &tr, &index);
7436
7437 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7438 if (ret)
7439 return ret;
7440
7441 if (val != 0 && val != 1)
7442 return -EINVAL;
7443
7444 mutex_lock(&trace_types_lock);
7445 ret = set_tracer_flag(tr, 1 << index, val);
7446 mutex_unlock(&trace_types_lock);
7447
7448 if (ret < 0)
7449 return ret;
7450
7451 *ppos += cnt;
7452
7453 return cnt;
7454 }
7455
7456 static const struct file_operations trace_options_core_fops = {
7457 .open = tracing_open_generic,
7458 .read = trace_options_core_read,
7459 .write = trace_options_core_write,
7460 .llseek = generic_file_llseek,
7461 };
7462
7463 struct dentry *trace_create_file(const char *name,
7464 umode_t mode,
7465 struct dentry *parent,
7466 void *data,
7467 const struct file_operations *fops)
7468 {
7469 struct dentry *ret;
7470
7471 ret = tracefs_create_file(name, mode, parent, data, fops);
7472 if (!ret)
7473 pr_warn("Could not create tracefs '%s' entry\n", name);
7474
7475 return ret;
7476 }
7477
7478
7479 static struct dentry *trace_options_init_dentry(struct trace_array *tr)
7480 {
7481 struct dentry *d_tracer;
7482
7483 if (tr->options)
7484 return tr->options;
7485
7486 d_tracer = tracing_get_dentry(tr);
7487 if (IS_ERR(d_tracer))
7488 return NULL;
7489
7490 tr->options = tracefs_create_dir("options", d_tracer);
7491 if (!tr->options) {
7492 pr_warn("Could not create tracefs directory 'options'\n");
7493 return NULL;
7494 }
7495
7496 return tr->options;
7497 }
7498
7499 static void
7500 create_trace_option_file(struct trace_array *tr,
7501 struct trace_option_dentry *topt,
7502 struct tracer_flags *flags,
7503 struct tracer_opt *opt)
7504 {
7505 struct dentry *t_options;
7506
7507 t_options = trace_options_init_dentry(tr);
7508 if (!t_options)
7509 return;
7510
7511 topt->flags = flags;
7512 topt->opt = opt;
7513 topt->tr = tr;
7514
7515 topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
7516 &trace_options_fops);
7517
7518 }
7519
7520 static void
7521 create_trace_option_files(struct trace_array *tr, struct tracer *tracer)
7522 {
7523 struct trace_option_dentry *topts;
7524 struct trace_options *tr_topts;
7525 struct tracer_flags *flags;
7526 struct tracer_opt *opts;
7527 int cnt;
7528 int i;
7529
7530 if (!tracer)
7531 return;
7532
7533 flags = tracer->flags;
7534
7535 if (!flags || !flags->opts)
7536 return;
7537
7538 /*
7539 * If this is an instance, only create flags for tracers
7540 * the instance may have.
7541 */
7542 if (!trace_ok_for_array(tracer, tr))
7543 return;
7544
7545 for (i = 0; i < tr->nr_topts; i++) {
7546 /* Make sure there's no duplicate flags. */
7547 if (WARN_ON_ONCE(tr->topts[i].tracer->flags == tracer->flags))
7548 return;
7549 }
7550
7551 opts = flags->opts;
7552
7553 for (cnt = 0; opts[cnt].name; cnt++)
7554 ;
7555
7556 topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
7557 if (!topts)
7558 return;
7559
7560 tr_topts = krealloc(tr->topts, sizeof(*tr->topts) * (tr->nr_topts + 1),
7561 GFP_KERNEL);
7562 if (!tr_topts) {
7563 kfree(topts);
7564 return;
7565 }
7566
7567 tr->topts = tr_topts;
7568 tr->topts[tr->nr_topts].tracer = tracer;
7569 tr->topts[tr->nr_topts].topts = topts;
7570 tr->nr_topts++;
7571
7572 for (cnt = 0; opts[cnt].name; cnt++) {
7573 create_trace_option_file(tr, &topts[cnt], flags,
7574 &opts[cnt]);
7575 WARN_ONCE(topts[cnt].entry == NULL,
7576 "Failed to create trace option: %s",
7577 opts[cnt].name);
7578 }
7579 }
7580
7581 static struct dentry *
7582 create_trace_option_core_file(struct trace_array *tr,
7583 const char *option, long index)
7584 {
7585 struct dentry *t_options;
7586
7587 t_options = trace_options_init_dentry(tr);
7588 if (!t_options)
7589 return NULL;
7590
7591 return trace_create_file(option, 0644, t_options,
7592 (void *)&tr->trace_flags_index[index],
7593 &trace_options_core_fops);
7594 }
7595
7596 static void create_trace_options_dir(struct trace_array *tr)
7597 {
7598 struct dentry *t_options;
7599 bool top_level = tr == &global_trace;
7600 int i;
7601
7602 t_options = trace_options_init_dentry(tr);
7603 if (!t_options)
7604 return;
7605
7606 for (i = 0; trace_options[i]; i++) {
7607 if (top_level ||
7608 !((1 << i) & TOP_LEVEL_TRACE_FLAGS))
7609 create_trace_option_core_file(tr, trace_options[i], i);
7610 }
7611 }
7612
7613 static ssize_t
7614 rb_simple_read(struct file *filp, char __user *ubuf,
7615 size_t cnt, loff_t *ppos)
7616 {
7617 struct trace_array *tr = filp->private_data;
7618 char buf[64];
7619 int r;
7620
7621 r = tracer_tracing_is_on(tr);
7622 r = sprintf(buf, "%d\n", r);
7623
7624 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7625 }
7626
7627 static ssize_t
7628 rb_simple_write(struct file *filp, const char __user *ubuf,
7629 size_t cnt, loff_t *ppos)
7630 {
7631 struct trace_array *tr = filp->private_data;
7632 struct ring_buffer *buffer = tr->trace_buffer.buffer;
7633 unsigned long val;
7634 int ret;
7635
7636 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7637 if (ret)
7638 return ret;
7639
7640 if (buffer) {
7641 mutex_lock(&trace_types_lock);
7642 if (!!val == tracer_tracing_is_on(tr)) {
7643 val = 0; /* do nothing */
7644 } else if (val) {
7645 tracer_tracing_on(tr);
7646 if (tr->current_trace->start)
7647 tr->current_trace->start(tr);
7648 } else {
7649 tracer_tracing_off(tr);
7650 if (tr->current_trace->stop)
7651 tr->current_trace->stop(tr);
7652 }
7653 mutex_unlock(&trace_types_lock);
7654 }
7655
7656 (*ppos)++;
7657
7658 return cnt;
7659 }
7660
7661 static const struct file_operations rb_simple_fops = {
7662 .open = tracing_open_generic_tr,
7663 .read = rb_simple_read,
7664 .write = rb_simple_write,
7665 .release = tracing_release_generic_tr,
7666 .llseek = default_llseek,
7667 };
7668
7669 static ssize_t
7670 buffer_percent_read(struct file *filp, char __user *ubuf,
7671 size_t cnt, loff_t *ppos)
7672 {
7673 struct trace_array *tr = filp->private_data;
7674 char buf[64];
7675 int r;
7676
7677 r = tr->buffer_percent;
7678 r = sprintf(buf, "%d\n", r);
7679
7680 return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
7681 }
7682
7683 static ssize_t
7684 buffer_percent_write(struct file *filp, const char __user *ubuf,
7685 size_t cnt, loff_t *ppos)
7686 {
7687 struct trace_array *tr = filp->private_data;
7688 unsigned long val;
7689 int ret;
7690
7691 ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
7692 if (ret)
7693 return ret;
7694
7695 if (val > 100)
7696 return -EINVAL;
7697
7698 if (!val)
7699 val = 1;
7700
7701 tr->buffer_percent = val;
7702
7703 (*ppos)++;
7704
7705 return cnt;
7706 }
7707
7708 static const struct file_operations buffer_percent_fops = {
7709 .open = tracing_open_generic_tr,
7710 .read = buffer_percent_read,
7711 .write = buffer_percent_write,
7712 .release = tracing_release_generic_tr,
7713 .llseek = default_llseek,
7714 };
7715
7716 struct dentry *trace_instance_dir;
7717
7718 static void
7719 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer);
7720
7721 static int
7722 allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
7723 {
7724 enum ring_buffer_flags rb_flags;
7725
7726 rb_flags = tr->trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
7727
7728 buf->tr = tr;
7729
7730 buf->buffer = ring_buffer_alloc(size, rb_flags);
7731 if (!buf->buffer)
7732 return -ENOMEM;
7733
7734 buf->data = alloc_percpu(struct trace_array_cpu);
7735 if (!buf->data) {
7736 ring_buffer_free(buf->buffer);
7737 buf->buffer = NULL;
7738 return -ENOMEM;
7739 }
7740
7741 /* Allocate the first page for all buffers */
7742 set_buffer_entries(&tr->trace_buffer,
7743 ring_buffer_size(tr->trace_buffer.buffer, 0));
7744
7745 return 0;
7746 }
7747
7748 static int allocate_trace_buffers(struct trace_array *tr, int size)
7749 {
7750 int ret;
7751
7752 ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
7753 if (ret)
7754 return ret;
7755
7756 #ifdef CONFIG_TRACER_MAX_TRACE
7757 ret = allocate_trace_buffer(tr, &tr->max_buffer,
7758 allocate_snapshot ? size : 1);
7759 if (WARN_ON(ret)) {
7760 ring_buffer_free(tr->trace_buffer.buffer);
7761 tr->trace_buffer.buffer = NULL;
7762 free_percpu(tr->trace_buffer.data);
7763 tr->trace_buffer.data = NULL;
7764 return -ENOMEM;
7765 }
7766 tr->allocated_snapshot = allocate_snapshot;
7767
7768 /*
7769 * Only the top level trace array gets its snapshot allocated
7770 * from the kernel command line.
7771 */
7772 allocate_snapshot = false;
7773 #endif
7774 return 0;
7775 }
7776
7777 static void free_trace_buffer(struct trace_buffer *buf)
7778 {
7779 if (buf->buffer) {
7780 ring_buffer_free(buf->buffer);
7781 buf->buffer = NULL;
7782 free_percpu(buf->data);
7783 buf->data = NULL;
7784 }
7785 }
7786
7787 static void free_trace_buffers(struct trace_array *tr)
7788 {
7789 if (!tr)
7790 return;
7791
7792 free_trace_buffer(&tr->trace_buffer);
7793
7794 #ifdef CONFIG_TRACER_MAX_TRACE
7795 free_trace_buffer(&tr->max_buffer);
7796 #endif
7797 }
7798
7799 static void init_trace_flags_index(struct trace_array *tr)
7800 {
7801 int i;
7802
7803 /* Used by the trace options files */
7804 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++)
7805 tr->trace_flags_index[i] = i;
7806 }
7807
7808 static void __update_tracer_options(struct trace_array *tr)
7809 {
7810 struct tracer *t;
7811
7812 for (t = trace_types; t; t = t->next)
7813 add_tracer_options(tr, t);
7814 }
7815
7816 static void update_tracer_options(struct trace_array *tr)
7817 {
7818 mutex_lock(&trace_types_lock);
7819 __update_tracer_options(tr);
7820 mutex_unlock(&trace_types_lock);
7821 }
7822
7823 static int instance_mkdir(const char *name)
7824 {
7825 struct trace_array *tr;
7826 int ret;
7827
7828 mutex_lock(&event_mutex);
7829 mutex_lock(&trace_types_lock);
7830
7831 ret = -EEXIST;
7832 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7833 if (tr->name && strcmp(tr->name, name) == 0)
7834 goto out_unlock;
7835 }
7836
7837 ret = -ENOMEM;
7838 tr = kzalloc(sizeof(*tr), GFP_KERNEL);
7839 if (!tr)
7840 goto out_unlock;
7841
7842 tr->name = kstrdup(name, GFP_KERNEL);
7843 if (!tr->name)
7844 goto out_free_tr;
7845
7846 if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL))
7847 goto out_free_tr;
7848
7849 tr->trace_flags = global_trace.trace_flags & ~ZEROED_TRACE_FLAGS;
7850
7851 cpumask_copy(tr->tracing_cpumask, cpu_all_mask);
7852
7853 raw_spin_lock_init(&tr->start_lock);
7854
7855 tr->max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
7856
7857 tr->current_trace = &nop_trace;
7858
7859 INIT_LIST_HEAD(&tr->systems);
7860 INIT_LIST_HEAD(&tr->events);
7861 INIT_LIST_HEAD(&tr->hist_vars);
7862
7863 if (allocate_trace_buffers(tr, trace_buf_size) < 0)
7864 goto out_free_tr;
7865
7866 tr->dir = tracefs_create_dir(name, trace_instance_dir);
7867 if (!tr->dir)
7868 goto out_free_tr;
7869
7870 ret = event_trace_add_tracer(tr->dir, tr);
7871 if (ret) {
7872 tracefs_remove_recursive(tr->dir);
7873 goto out_free_tr;
7874 }
7875
7876 ftrace_init_trace_array(tr);
7877
7878 init_tracer_tracefs(tr, tr->dir);
7879 init_trace_flags_index(tr);
7880 __update_tracer_options(tr);
7881
7882 list_add(&tr->list, &ftrace_trace_arrays);
7883
7884 mutex_unlock(&trace_types_lock);
7885 mutex_unlock(&event_mutex);
7886
7887 return 0;
7888
7889 out_free_tr:
7890 free_trace_buffers(tr);
7891 free_cpumask_var(tr->tracing_cpumask);
7892 kfree(tr->name);
7893 kfree(tr);
7894
7895 out_unlock:
7896 mutex_unlock(&trace_types_lock);
7897 mutex_unlock(&event_mutex);
7898
7899 return ret;
7900
7901 }
7902
7903 static int instance_rmdir(const char *name)
7904 {
7905 struct trace_array *tr;
7906 int found = 0;
7907 int ret;
7908 int i;
7909
7910 mutex_lock(&event_mutex);
7911 mutex_lock(&trace_types_lock);
7912
7913 ret = -ENODEV;
7914 list_for_each_entry(tr, &ftrace_trace_arrays, list) {
7915 if (tr->name && strcmp(tr->name, name) == 0) {
7916 found = 1;
7917 break;
7918 }
7919 }
7920 if (!found)
7921 goto out_unlock;
7922
7923 ret = -EBUSY;
7924 if (tr->ref || (tr->current_trace && tr->current_trace->ref))
7925 goto out_unlock;
7926
7927 list_del(&tr->list);
7928
7929 /* Disable all the flags that were enabled coming in */
7930 for (i = 0; i < TRACE_FLAGS_MAX_SIZE; i++) {
7931 if ((1 << i) & ZEROED_TRACE_FLAGS)
7932 set_tracer_flag(tr, 1 << i, 0);
7933 }
7934
7935 tracing_set_nop(tr);
7936 clear_ftrace_function_probes(tr);
7937 event_trace_del_tracer(tr);
7938 ftrace_clear_pids(tr);
7939 ftrace_destroy_function_files(tr);
7940 tracefs_remove_recursive(tr->dir);
7941 free_trace_buffers(tr);
7942
7943 for (i = 0; i < tr->nr_topts; i++) {
7944 kfree(tr->topts[i].topts);
7945 }
7946 kfree(tr->topts);
7947
7948 free_cpumask_var(tr->tracing_cpumask);
7949 kfree(tr->name);
7950 kfree(tr);
7951
7952 ret = 0;
7953
7954 out_unlock:
7955 mutex_unlock(&trace_types_lock);
7956 mutex_unlock(&event_mutex);
7957
7958 return ret;
7959 }
7960
7961 static __init void create_trace_instances(struct dentry *d_tracer)
7962 {
7963 trace_instance_dir = tracefs_create_instance_dir("instances", d_tracer,
7964 instance_mkdir,
7965 instance_rmdir);
7966 if (WARN_ON(!trace_instance_dir))
7967 return;
7968 }
7969
7970 static void
7971 init_tracer_tracefs(struct trace_array *tr, struct dentry *d_tracer)
7972 {
7973 struct trace_event_file *file;
7974 int cpu;
7975
7976 trace_create_file("available_tracers", 0444, d_tracer,
7977 tr, &show_traces_fops);
7978
7979 trace_create_file("current_tracer", 0644, d_tracer,
7980 tr, &set_tracer_fops);
7981
7982 trace_create_file("tracing_cpumask", 0644, d_tracer,
7983 tr, &tracing_cpumask_fops);
7984
7985 trace_create_file("trace_options", 0644, d_tracer,
7986 tr, &tracing_iter_fops);
7987
7988 trace_create_file("trace", 0644, d_tracer,
7989 tr, &tracing_fops);
7990
7991 trace_create_file("trace_pipe", 0444, d_tracer,
7992 tr, &tracing_pipe_fops);
7993
7994 trace_create_file("buffer_size_kb", 0644, d_tracer,
7995 tr, &tracing_entries_fops);
7996
7997 trace_create_file("buffer_total_size_kb", 0444, d_tracer,
7998 tr, &tracing_total_entries_fops);
7999
8000 trace_create_file("free_buffer", 0200, d_tracer,
8001 tr, &tracing_free_buffer_fops);
8002
8003 trace_create_file("trace_marker", 0220, d_tracer,
8004 tr, &tracing_mark_fops);
8005
8006 file = __find_event_file(tr, "ftrace", "print");
8007 if (file && file->dir)
8008 trace_create_file("trigger", 0644, file->dir, file,
8009 &event_trigger_fops);
8010 tr->trace_marker_file = file;
8011
8012 trace_create_file("trace_marker_raw", 0220, d_tracer,
8013 tr, &tracing_mark_raw_fops);
8014
8015 trace_create_file("trace_clock", 0644, d_tracer, tr,
8016 &trace_clock_fops);
8017
8018 trace_create_file("tracing_on", 0644, d_tracer,
8019 tr, &rb_simple_fops);
8020
8021 trace_create_file("timestamp_mode", 0444, d_tracer, tr,
8022 &trace_time_stamp_mode_fops);
8023
8024 tr->buffer_percent = 50;
8025
8026 trace_create_file("buffer_percent", 0444, d_tracer,
8027 tr, &buffer_percent_fops);
8028
8029 create_trace_options_dir(tr);
8030
8031 #if defined(CONFIG_TRACER_MAX_TRACE) || defined(CONFIG_HWLAT_TRACER)
8032 trace_create_file("tracing_max_latency", 0644, d_tracer,
8033 &tr->max_latency, &tracing_max_lat_fops);
8034 #endif
8035
8036 if (ftrace_create_function_files(tr, d_tracer))
8037 WARN(1, "Could not allocate function filter files");
8038
8039 #ifdef CONFIG_TRACER_SNAPSHOT
8040 trace_create_file("snapshot", 0644, d_tracer,
8041 tr, &snapshot_fops);
8042 #endif
8043
8044 for_each_tracing_cpu(cpu)
8045 tracing_init_tracefs_percpu(tr, cpu);
8046
8047 ftrace_init_tracefs(tr, d_tracer);
8048 }
8049
8050 static struct vfsmount *trace_automount(struct dentry *mntpt, void *ingore)
8051 {
8052 struct vfsmount *mnt;
8053 struct file_system_type *type;
8054
8055 /*
8056 * To maintain backward compatibility for tools that mount
8057 * debugfs to get to the tracing facility, tracefs is automatically
8058 * mounted to the debugfs/tracing directory.
8059 */
8060 type = get_fs_type("tracefs");
8061 if (!type)
8062 return NULL;
8063 mnt = vfs_submount(mntpt, type, "tracefs", NULL);
8064 put_filesystem(type);
8065 if (IS_ERR(mnt))
8066 return NULL;
8067 mntget(mnt);
8068
8069 return mnt;
8070 }
8071
8072 /**
8073 * tracing_init_dentry - initialize top level trace array
8074 *
8075 * This is called when creating files or directories in the tracing
8076 * directory. It is called via fs_initcall() by any of the boot up code
8077 * and expects to return the dentry of the top level tracing directory.
8078 */
8079 struct dentry *tracing_init_dentry(void)
8080 {
8081 struct trace_array *tr = &global_trace;
8082
8083 /* The top level trace array uses NULL as parent */
8084 if (tr->dir)
8085 return NULL;
8086
8087 if (WARN_ON(!tracefs_initialized()) ||
8088 (IS_ENABLED(CONFIG_DEBUG_FS) &&
8089 WARN_ON(!debugfs_initialized())))
8090 return ERR_PTR(-ENODEV);
8091
8092 /*
8093 * As there may still be users that expect the tracing
8094 * files to exist in debugfs/tracing, we must automount
8095 * the tracefs file system there, so older tools still
8096 * work with the newer kerenl.
8097 */
8098 tr->dir = debugfs_create_automount("tracing", NULL,
8099 trace_automount, NULL);
8100 if (!tr->dir) {
8101 pr_warn_once("Could not create debugfs directory 'tracing'\n");
8102 return ERR_PTR(-ENOMEM);
8103 }
8104
8105 return NULL;
8106 }
8107
8108 extern struct trace_eval_map *__start_ftrace_eval_maps[];
8109 extern struct trace_eval_map *__stop_ftrace_eval_maps[];
8110
8111 static void __init trace_eval_init(void)
8112 {
8113 int len;
8114
8115 len = __stop_ftrace_eval_maps - __start_ftrace_eval_maps;
8116 trace_insert_eval_map(NULL, __start_ftrace_eval_maps, len);
8117 }
8118
8119 #ifdef CONFIG_MODULES
8120 static void trace_module_add_evals(struct module *mod)
8121 {
8122 if (!mod->num_trace_evals)
8123 return;
8124
8125 /*
8126 * Modules with bad taint do not have events created, do
8127 * not bother with enums either.
8128 */
8129 if (trace_module_has_bad_taint(mod))
8130 return;
8131
8132 trace_insert_eval_map(mod, mod->trace_evals, mod->num_trace_evals);
8133 }
8134
8135 #ifdef CONFIG_TRACE_EVAL_MAP_FILE
8136 static void trace_module_remove_evals(struct module *mod)
8137 {
8138 union trace_eval_map_item *map;
8139 union trace_eval_map_item **last = &trace_eval_maps;
8140
8141 if (!mod->num_trace_evals)
8142 return;
8143
8144 mutex_lock(&trace_eval_mutex);
8145
8146 map = trace_eval_maps;
8147
8148 while (map) {
8149 if (map->head.mod == mod)
8150 break;
8151 map = trace_eval_jmp_to_tail(map);
8152 last = &map->tail.next;
8153 map = map->tail.next;
8154 }
8155 if (!map)
8156 goto out;
8157
8158 *last = trace_eval_jmp_to_tail(map)->tail.next;
8159 kfree(map);
8160 out:
8161 mutex_unlock(&trace_eval_mutex);
8162 }
8163 #else
8164 static inline void trace_module_remove_evals(struct module *mod) { }
8165 #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
8166
8167 static int trace_module_notify(struct notifier_block *self,
8168 unsigned long val, void *data)
8169 {
8170 struct module *mod = data;
8171
8172 switch (val) {
8173 case MODULE_STATE_COMING:
8174 trace_module_add_evals(mod);
8175 break;
8176 case MODULE_STATE_GOING:
8177 trace_module_remove_evals(mod);
8178 break;
8179 }
8180
8181 return 0;
8182 }
8183
8184 static struct notifier_block trace_module_nb = {
8185 .notifier_call = trace_module_notify,
8186 .priority = 0,
8187 };
8188 #endif /* CONFIG_MODULES */
8189
8190 static __init int tracer_init_tracefs(void)
8191 {
8192 struct dentry *d_tracer;
8193
8194 trace_access_lock_init();
8195
8196 d_tracer = tracing_init_dentry();
8197 if (IS_ERR(d_tracer))
8198 return 0;
8199
8200 event_trace_init();
8201
8202 init_tracer_tracefs(&global_trace, d_tracer);
8203 ftrace_init_tracefs_toplevel(&global_trace, d_tracer);
8204
8205 trace_create_file("tracing_thresh", 0644, d_tracer,
8206 &global_trace, &tracing_thresh_fops);
8207
8208 trace_create_file("README", 0444, d_tracer,
8209 NULL, &tracing_readme_fops);
8210
8211 trace_create_file("saved_cmdlines", 0444, d_tracer,
8212 NULL, &tracing_saved_cmdlines_fops);
8213
8214 trace_create_file("saved_cmdlines_size", 0644, d_tracer,
8215 NULL, &tracing_saved_cmdlines_size_fops);
8216
8217 trace_create_file("saved_tgids", 0444, d_tracer,
8218 NULL, &tracing_saved_tgids_fops);
8219
8220 trace_eval_init();
8221
8222 trace_create_eval_file(d_tracer);
8223
8224 #ifdef CONFIG_MODULES
8225 register_module_notifier(&trace_module_nb);
8226 #endif
8227
8228 #ifdef CONFIG_DYNAMIC_FTRACE
8229 trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
8230 &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
8231 #endif
8232
8233 create_trace_instances(d_tracer);
8234
8235 update_tracer_options(&global_trace);
8236
8237 return 0;
8238 }
8239
8240 static int trace_panic_handler(struct notifier_block *this,
8241 unsigned long event, void *unused)
8242 {
8243 if (ftrace_dump_on_oops)
8244 ftrace_dump(ftrace_dump_on_oops);
8245 return NOTIFY_OK;
8246 }
8247
8248 static struct notifier_block trace_panic_notifier = {
8249 .notifier_call = trace_panic_handler,
8250 .next = NULL,
8251 .priority = 150 /* priority: INT_MAX >= x >= 0 */
8252 };
8253
8254 static int trace_die_handler(struct notifier_block *self,
8255 unsigned long val,
8256 void *data)
8257 {
8258 switch (val) {
8259 case DIE_OOPS:
8260 if (ftrace_dump_on_oops)
8261 ftrace_dump(ftrace_dump_on_oops);
8262 break;
8263 default:
8264 break;
8265 }
8266 return NOTIFY_OK;
8267 }
8268
8269 static struct notifier_block trace_die_notifier = {
8270 .notifier_call = trace_die_handler,
8271 .priority = 200
8272 };
8273
8274 /*
8275 * printk is set to max of 1024, we really don't need it that big.
8276 * Nothing should be printing 1000 characters anyway.
8277 */
8278 #define TRACE_MAX_PRINT 1000
8279
8280 /*
8281 * Define here KERN_TRACE so that we have one place to modify
8282 * it if we decide to change what log level the ftrace dump
8283 * should be at.
8284 */
8285 #define KERN_TRACE KERN_EMERG
8286
8287 void
8288 trace_printk_seq(struct trace_seq *s)
8289 {
8290 /* Probably should print a warning here. */
8291 if (s->seq.len >= TRACE_MAX_PRINT)
8292 s->seq.len = TRACE_MAX_PRINT;
8293
8294 /*
8295 * More paranoid code. Although the buffer size is set to
8296 * PAGE_SIZE, and TRACE_MAX_PRINT is 1000, this is just
8297 * an extra layer of protection.
8298 */
8299 if (WARN_ON_ONCE(s->seq.len >= s->seq.size))
8300 s->seq.len = s->seq.size - 1;
8301
8302 /* should be zero ended, but we are paranoid. */
8303 s->buffer[s->seq.len] = 0;
8304
8305 printk(KERN_TRACE "%s", s->buffer);
8306
8307 trace_seq_init(s);
8308 }
8309
8310 void trace_init_global_iter(struct trace_iterator *iter)
8311 {
8312 iter->tr = &global_trace;
8313 iter->trace = iter->tr->current_trace;
8314 iter->cpu_file = RING_BUFFER_ALL_CPUS;
8315 iter->trace_buffer = &global_trace.trace_buffer;
8316
8317 if (iter->trace && iter->trace->open)
8318 iter->trace->open(iter);
8319
8320 /* Annotate start of buffers if we had overruns */
8321 if (ring_buffer_overruns(iter->trace_buffer->buffer))
8322 iter->iter_flags |= TRACE_FILE_ANNOTATE;
8323
8324 /* Output in nanoseconds only if we are using a clock in nanoseconds. */
8325 if (trace_clocks[iter->tr->clock_id].in_ns)
8326 iter->iter_flags |= TRACE_FILE_TIME_IN_NS;
8327 }
8328
8329 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
8330 {
8331 /* use static because iter can be a bit big for the stack */
8332 static struct trace_iterator iter;
8333 static atomic_t dump_running;
8334 struct trace_array *tr = &global_trace;
8335 unsigned int old_userobj;
8336 unsigned long flags;
8337 int cnt = 0, cpu;
8338
8339 /* Only allow one dump user at a time. */
8340 if (atomic_inc_return(&dump_running) != 1) {
8341 atomic_dec(&dump_running);
8342 return;
8343 }
8344
8345 /*
8346 * Always turn off tracing when we dump.
8347 * We don't need to show trace output of what happens
8348 * between multiple crashes.
8349 *
8350 * If the user does a sysrq-z, then they can re-enable
8351 * tracing with echo 1 > tracing_on.
8352 */
8353 tracing_off();
8354
8355 local_irq_save(flags);
8356 printk_nmi_direct_enter();
8357
8358 /* Simulate the iterator */
8359 trace_init_global_iter(&iter);
8360
8361 for_each_tracing_cpu(cpu) {
8362 atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8363 }
8364
8365 old_userobj = tr->trace_flags & TRACE_ITER_SYM_USEROBJ;
8366
8367 /* don't look at user memory in panic mode */
8368 tr->trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
8369
8370 switch (oops_dump_mode) {
8371 case DUMP_ALL:
8372 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8373 break;
8374 case DUMP_ORIG:
8375 iter.cpu_file = raw_smp_processor_id();
8376 break;
8377 case DUMP_NONE:
8378 goto out_enable;
8379 default:
8380 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
8381 iter.cpu_file = RING_BUFFER_ALL_CPUS;
8382 }
8383
8384 printk(KERN_TRACE "Dumping ftrace buffer:\n");
8385
8386 /* Did function tracer already get disabled? */
8387 if (ftrace_is_dead()) {
8388 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
8389 printk("# MAY BE MISSING FUNCTION EVENTS\n");
8390 }
8391
8392 /*
8393 * We need to stop all tracing on all CPUS to read the
8394 * the next buffer. This is a bit expensive, but is
8395 * not done often. We fill all what we can read,
8396 * and then release the locks again.
8397 */
8398
8399 while (!trace_empty(&iter)) {
8400
8401 if (!cnt)
8402 printk(KERN_TRACE "---------------------------------\n");
8403
8404 cnt++;
8405
8406 /* reset all but tr, trace, and overruns */
8407 memset(&iter.seq, 0,
8408 sizeof(struct trace_iterator) -
8409 offsetof(struct trace_iterator, seq));
8410 iter.iter_flags |= TRACE_FILE_LAT_FMT;
8411 iter.pos = -1;
8412
8413 if (trace_find_next_entry_inc(&iter) != NULL) {
8414 int ret;
8415
8416 ret = print_trace_line(&iter);
8417 if (ret != TRACE_TYPE_NO_CONSUME)
8418 trace_consume(&iter);
8419 }
8420 touch_nmi_watchdog();
8421
8422 trace_printk_seq(&iter.seq);
8423 }
8424
8425 if (!cnt)
8426 printk(KERN_TRACE " (ftrace buffer empty)\n");
8427 else
8428 printk(KERN_TRACE "---------------------------------\n");
8429
8430 out_enable:
8431 tr->trace_flags |= old_userobj;
8432
8433 for_each_tracing_cpu(cpu) {
8434 atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
8435 }
8436 atomic_dec(&dump_running);
8437 printk_nmi_direct_exit();
8438 local_irq_restore(flags);
8439 }
8440 EXPORT_SYMBOL_GPL(ftrace_dump);
8441
8442 int trace_run_command(const char *buf, int (*createfn)(int, char **))
8443 {
8444 char **argv;
8445 int argc, ret;
8446
8447 argc = 0;
8448 ret = 0;
8449 argv = argv_split(GFP_KERNEL, buf, &argc);
8450 if (!argv)
8451 return -ENOMEM;
8452
8453 if (argc)
8454 ret = createfn(argc, argv);
8455
8456 argv_free(argv);
8457
8458 return ret;
8459 }
8460
8461 #define WRITE_BUFSIZE 4096
8462
8463 ssize_t trace_parse_run_command(struct file *file, const char __user *buffer,
8464 size_t count, loff_t *ppos,
8465 int (*createfn)(int, char **))
8466 {
8467 char *kbuf, *buf, *tmp;
8468 int ret = 0;
8469 size_t done = 0;
8470 size_t size;
8471
8472 kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL);
8473 if (!kbuf)
8474 return -ENOMEM;
8475
8476 while (done < count) {
8477 size = count - done;
8478
8479 if (size >= WRITE_BUFSIZE)
8480 size = WRITE_BUFSIZE - 1;
8481
8482 if (copy_from_user(kbuf, buffer + done, size)) {
8483 ret = -EFAULT;
8484 goto out;
8485 }
8486 kbuf[size] = '\0';
8487 buf = kbuf;
8488 do {
8489 tmp = strchr(buf, '\n');
8490 if (tmp) {
8491 *tmp = '\0';
8492 size = tmp - buf + 1;
8493 } else {
8494 size = strlen(buf);
8495 if (done + size < count) {
8496 if (buf != kbuf)
8497 break;
8498 /* This can accept WRITE_BUFSIZE - 2 ('\n' + '\0') */
8499 pr_warn("Line length is too long: Should be less than %d\n",
8500 WRITE_BUFSIZE - 2);
8501 ret = -EINVAL;
8502 goto out;
8503 }
8504 }
8505 done += size;
8506
8507 /* Remove comments */
8508 tmp = strchr(buf, '#');
8509
8510 if (tmp)
8511 *tmp = '\0';
8512
8513 ret = trace_run_command(buf, createfn);
8514 if (ret)
8515 goto out;
8516 buf += size;
8517
8518 } while (done < count);
8519 }
8520 ret = done;
8521
8522 out:
8523 kfree(kbuf);
8524
8525 return ret;
8526 }
8527
8528 __init static int tracer_alloc_buffers(void)
8529 {
8530 int ring_buf_size;
8531 int ret = -ENOMEM;
8532
8533 /*
8534 * Make sure we don't accidently add more trace options
8535 * than we have bits for.
8536 */
8537 BUILD_BUG_ON(TRACE_ITER_LAST_BIT > TRACE_FLAGS_MAX_SIZE);
8538
8539 if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
8540 goto out;
8541
8542 if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL))
8543 goto out_free_buffer_mask;
8544
8545 /* Only allocate trace_printk buffers if a trace_printk exists */
8546 if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
8547 /* Must be called before global_trace.buffer is allocated */
8548 trace_printk_init_buffers();
8549
8550 /* To save memory, keep the ring buffer size to its minimum */
8551 if (ring_buffer_expanded)
8552 ring_buf_size = trace_buf_size;
8553 else
8554 ring_buf_size = 1;
8555
8556 cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
8557 cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask);
8558
8559 raw_spin_lock_init(&global_trace.start_lock);
8560
8561 /*
8562 * The prepare callbacks allocates some memory for the ring buffer. We
8563 * don't free the buffer if the if the CPU goes down. If we were to free
8564 * the buffer, then the user would lose any trace that was in the
8565 * buffer. The memory will be removed once the "instance" is removed.
8566 */
8567 ret = cpuhp_setup_state_multi(CPUHP_TRACE_RB_PREPARE,
8568 "trace/RB:preapre", trace_rb_cpu_prepare,
8569 NULL);
8570 if (ret < 0)
8571 goto out_free_cpumask;
8572 /* Used for event triggers */
8573 ret = -ENOMEM;
8574 temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
8575 if (!temp_buffer)
8576 goto out_rm_hp_state;
8577
8578 if (trace_create_savedcmd() < 0)
8579 goto out_free_temp_buffer;
8580
8581 /* TODO: make the number of buffers hot pluggable with CPUS */
8582 if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
8583 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
8584 WARN_ON(1);
8585 goto out_free_savedcmd;
8586 }
8587
8588 if (global_trace.buffer_disabled)
8589 tracing_off();
8590
8591 if (trace_boot_clock) {
8592 ret = tracing_set_clock(&global_trace, trace_boot_clock);
8593 if (ret < 0)
8594 pr_warn("Trace clock %s not defined, going back to default\n",
8595 trace_boot_clock);
8596 }
8597
8598 /*
8599 * register_tracer() might reference current_trace, so it
8600 * needs to be set before we register anything. This is
8601 * just a bootstrap of current_trace anyway.
8602 */
8603 global_trace.current_trace = &nop_trace;
8604
8605 global_trace.max_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
8606
8607 ftrace_init_global_array_ops(&global_trace);
8608
8609 init_trace_flags_index(&global_trace);
8610
8611 register_tracer(&nop_trace);
8612
8613 /* Function tracing may start here (via kernel command line) */
8614 init_function_trace();
8615
8616 /* All seems OK, enable tracing */
8617 tracing_disabled = 0;
8618
8619 atomic_notifier_chain_register(&panic_notifier_list,
8620 &trace_panic_notifier);
8621
8622 register_die_notifier(&trace_die_notifier);
8623
8624 global_trace.flags = TRACE_ARRAY_FL_GLOBAL;
8625
8626 INIT_LIST_HEAD(&global_trace.systems);
8627 INIT_LIST_HEAD(&global_trace.events);
8628 INIT_LIST_HEAD(&global_trace.hist_vars);
8629 list_add(&global_trace.list, &ftrace_trace_arrays);
8630
8631 apply_trace_boot_options();
8632
8633 register_snapshot_cmd();
8634
8635 return 0;
8636
8637 out_free_savedcmd:
8638 free_saved_cmdlines_buffer(savedcmd);
8639 out_free_temp_buffer:
8640 ring_buffer_free(temp_buffer);
8641 out_rm_hp_state:
8642 cpuhp_remove_multi_state(CPUHP_TRACE_RB_PREPARE);
8643 out_free_cpumask:
8644 free_cpumask_var(global_trace.tracing_cpumask);
8645 out_free_buffer_mask:
8646 free_cpumask_var(tracing_buffer_mask);
8647 out:
8648 return ret;
8649 }
8650
8651 void __init early_trace_init(void)
8652 {
8653 if (tracepoint_printk) {
8654 tracepoint_print_iter =
8655 kmalloc(sizeof(*tracepoint_print_iter), GFP_KERNEL);
8656 if (WARN_ON(!tracepoint_print_iter))
8657 tracepoint_printk = 0;
8658 else
8659 static_key_enable(&tracepoint_printk_key.key);
8660 }
8661 tracer_alloc_buffers();
8662 }
8663
8664 void __init trace_init(void)
8665 {
8666 trace_event_init();
8667 }
8668
8669 __init static int clear_boot_tracer(void)
8670 {
8671 /*
8672 * The default tracer at boot buffer is an init section.
8673 * This function is called in lateinit. If we did not
8674 * find the boot tracer, then clear it out, to prevent
8675 * later registration from accessing the buffer that is
8676 * about to be freed.
8677 */
8678 if (!default_bootup_tracer)
8679 return 0;
8680
8681 printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
8682 default_bootup_tracer);
8683 default_bootup_tracer = NULL;
8684
8685 return 0;
8686 }
8687
8688 fs_initcall(tracer_init_tracefs);
8689 late_initcall_sync(clear_boot_tracer);
8690
8691 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
8692 __init static int tracing_set_default_clock(void)
8693 {
8694 /* sched_clock_stable() is determined in late_initcall */
8695 if (!trace_boot_clock && !sched_clock_stable()) {
8696 printk(KERN_WARNING
8697 "Unstable clock detected, switching default tracing clock to \"global\"\n"
8698 "If you want to keep using the local clock, then add:\n"
8699 " \"trace_clock=local\"\n"
8700 "on the kernel command line\n");
8701 tracing_set_clock(&global_trace, "global");
8702 }
8703
8704 return 0;
8705 }
8706 late_initcall_sync(tracing_set_default_clock);
8707 #endif