1 // SPDX-License-Identifier: GPL-2.0-only
5 * Builtin top command: Display a continuously updated profile of
6 * any workload, CPU or specific PID.
8 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Improvements and fixes by:
13 * Arjan van de Ven <arjan@linux.intel.com>
14 * Yanmin Zhang <yanmin.zhang@intel.com>
15 * Wu Fengguang <fengguang.wu@intel.com>
16 * Mike Galbraith <efault@gmx.de>
17 * Paul Mackerras <paulus@samba.org>
23 #include "util/annotate.h"
24 #include "util/bpf-event.h"
25 #include "util/config.h"
26 #include "util/color.h"
28 #include "util/evlist.h"
29 #include "util/evsel.h"
30 #include "util/evsel_config.h"
31 #include "util/event.h"
32 #include "util/machine.h"
34 #include "util/mmap.h"
35 #include "util/session.h"
36 #include "util/symbol.h"
37 #include "util/synthetic-events.h"
39 #include "util/util.h"
40 #include <linux/rbtree.h>
41 #include <subcmd/parse-options.h>
42 #include "util/parse-events.h"
43 #include "util/callchain.h"
44 #include "util/cpumap.h"
45 #include "util/sort.h"
46 #include "util/string2.h"
47 #include "util/term.h"
48 #include "util/intlist.h"
49 #include "util/parse-branch-options.h"
50 #include "arch/common.h"
53 #include "util/debug.h"
54 #include "util/ordered-events.h"
70 #include <sys/syscall.h>
71 #include <sys/ioctl.h>
73 #include <sys/prctl.h>
76 #include <sys/utsname.h>
79 #include <linux/stringify.h>
80 #include <linux/time64.h>
81 #include <linux/types.h>
82 #include <linux/err.h>
84 #include <linux/ctype.h>
85 #include <perf/mmap.h>
87 static volatile int done
;
88 static volatile int resize
;
90 #define HEADER_LINE_NR 5
92 static void perf_top__update_print_entries(struct perf_top
*top
)
94 top
->print_entries
= top
->winsize
.ws_row
- HEADER_LINE_NR
;
97 static void winch_sig(int sig __maybe_unused
)
102 static void perf_top__resize(struct perf_top
*top
)
104 get_term_dimensions(&top
->winsize
);
105 perf_top__update_print_entries(top
);
108 static int perf_top__parse_source(struct perf_top
*top
, struct hist_entry
*he
)
112 struct annotation
*notes
;
116 if (!he
|| !he
->ms
.sym
)
119 evsel
= hists_to_evsel(he
->hists
);
125 * We can't annotate with just /proc/kallsyms
127 if (map
->dso
->symtab_type
== DSO_BINARY_TYPE__KALLSYMS
&&
128 !dso__is_kcore(map
->dso
)) {
129 pr_err("Can't annotate %s: No vmlinux file was found in the "
130 "path\n", sym
->name
);
135 notes
= symbol__annotation(sym
);
136 pthread_mutex_lock(¬es
->lock
);
138 if (!symbol__hists(sym
, top
->evlist
->core
.nr_entries
)) {
139 pthread_mutex_unlock(¬es
->lock
);
140 pr_err("Not enough memory for annotating '%s' symbol!\n",
146 err
= symbol__annotate(&he
->ms
, evsel
, &top
->annotation_opts
, NULL
);
148 top
->sym_filter_entry
= he
;
151 symbol__strerror_disassemble(&he
->ms
, err
, msg
, sizeof(msg
));
152 pr_err("Couldn't annotate %s: %s\n", sym
->name
, msg
);
155 pthread_mutex_unlock(¬es
->lock
);
159 static void __zero_source_counters(struct hist_entry
*he
)
161 struct symbol
*sym
= he
->ms
.sym
;
162 symbol__annotate_zero_histograms(sym
);
165 static void ui__warn_map_erange(struct map
*map
, struct symbol
*sym
, u64 ip
)
168 int err
= uname(&uts
);
170 ui__warning("Out of bounds address found:\n\n"
171 "Addr: %" PRIx64
"\n"
173 "Map: %" PRIx64
"-%" PRIx64
"\n"
174 "Symbol: %" PRIx64
"-%" PRIx64
" %c %s\n"
178 "Not all samples will be on the annotation output.\n\n"
179 "Please report to linux-kernel@vger.kernel.org\n",
180 ip
, map
->dso
->long_name
, dso__symtab_origin(map
->dso
),
181 map
->start
, map
->end
, sym
->start
, sym
->end
,
182 sym
->binding
== STB_GLOBAL
? 'g' :
183 sym
->binding
== STB_LOCAL
? 'l' : 'w', sym
->name
,
184 err
? "[unknown]" : uts
.machine
,
185 err
? "[unknown]" : uts
.release
, perf_version_string
);
186 if (use_browser
<= 0)
189 map
->erange_warned
= true;
192 static void perf_top__record_precise_ip(struct perf_top
*top
,
193 struct hist_entry
*he
,
194 struct perf_sample
*sample
,
195 struct evsel
*evsel
, u64 ip
)
197 struct annotation
*notes
;
198 struct symbol
*sym
= he
->ms
.sym
;
201 if (sym
== NULL
|| (use_browser
== 0 &&
202 (top
->sym_filter_entry
== NULL
||
203 top
->sym_filter_entry
->ms
.sym
!= sym
)))
206 notes
= symbol__annotation(sym
);
208 if (pthread_mutex_trylock(¬es
->lock
))
211 err
= hist_entry__inc_addr_samples(he
, sample
, evsel
, ip
);
213 pthread_mutex_unlock(¬es
->lock
);
217 * This function is now called with he->hists->lock held.
218 * Release it before going to sleep.
220 pthread_mutex_unlock(&he
->hists
->lock
);
222 if (err
== -ERANGE
&& !he
->ms
.map
->erange_warned
)
223 ui__warn_map_erange(he
->ms
.map
, sym
, ip
);
224 else if (err
== -ENOMEM
) {
225 pr_err("Not enough memory for annotating '%s' symbol!\n",
230 pthread_mutex_lock(&he
->hists
->lock
);
234 static void perf_top__show_details(struct perf_top
*top
)
236 struct hist_entry
*he
= top
->sym_filter_entry
;
238 struct annotation
*notes
;
239 struct symbol
*symbol
;
245 evsel
= hists_to_evsel(he
->hists
);
248 notes
= symbol__annotation(symbol
);
250 pthread_mutex_lock(¬es
->lock
);
252 symbol__calc_percent(symbol
, evsel
);
254 if (notes
->src
== NULL
)
257 printf("Showing %s for %s\n", perf_evsel__name(top
->sym_evsel
), symbol
->name
);
258 printf(" Events Pcnt (>=%d%%)\n", top
->annotation_opts
.min_pcnt
);
260 more
= symbol__annotate_printf(&he
->ms
, top
->sym_evsel
, &top
->annotation_opts
);
262 if (top
->evlist
->enabled
) {
264 symbol__annotate_zero_histogram(symbol
, top
->sym_evsel
->idx
);
266 symbol__annotate_decay_histogram(symbol
, top
->sym_evsel
->idx
);
269 printf("%d lines not displayed, maybe increase display entries [e]\n", more
);
271 pthread_mutex_unlock(¬es
->lock
);
274 static void perf_top__resort_hists(struct perf_top
*t
)
276 struct evlist
*evlist
= t
->evlist
;
279 evlist__for_each_entry(evlist
, pos
) {
280 struct hists
*hists
= evsel__hists(pos
);
283 * unlink existing entries so that they can be linked
284 * in a correct order in hists__match() below.
286 hists__unlink(hists
);
288 if (evlist
->enabled
) {
290 hists__delete_entries(hists
);
292 hists__decay_entries(hists
, t
->hide_user_symbols
,
293 t
->hide_kernel_symbols
);
297 hists__collapse_resort(hists
, NULL
);
299 /* Non-group events are considered as leader */
300 if (symbol_conf
.event_group
&&
301 !perf_evsel__is_group_leader(pos
)) {
302 struct hists
*leader_hists
= evsel__hists(pos
->leader
);
304 hists__match(leader_hists
, hists
);
305 hists__link(leader_hists
, hists
);
309 evlist__for_each_entry(evlist
, pos
) {
310 perf_evsel__output_resort(pos
, NULL
);
314 static void perf_top__print_sym_table(struct perf_top
*top
)
318 const int win_width
= top
->winsize
.ws_col
- 1;
319 struct evsel
*evsel
= top
->sym_evsel
;
320 struct hists
*hists
= evsel__hists(evsel
);
324 perf_top__header_snprintf(top
, bf
, sizeof(bf
));
327 printf("%-*.*s\n", win_width
, win_width
, graph_dotted_line
);
329 if (!top
->record_opts
.overwrite
&&
330 (hists
->stats
.nr_lost_warned
!=
331 hists
->stats
.nr_events
[PERF_RECORD_LOST
])) {
332 hists
->stats
.nr_lost_warned
=
333 hists
->stats
.nr_events
[PERF_RECORD_LOST
];
334 color_fprintf(stdout
, PERF_COLOR_RED
,
335 "WARNING: LOST %d chunks, Check IO/CPU overload",
336 hists
->stats
.nr_lost_warned
);
340 if (top
->sym_filter_entry
) {
341 perf_top__show_details(top
);
345 perf_top__resort_hists(top
);
347 hists__output_recalc_col_len(hists
, top
->print_entries
- printed
);
349 hists__fprintf(hists
, false, top
->print_entries
- printed
, win_width
,
350 top
->min_percent
, stdout
, !symbol_conf
.use_callchain
);
353 static void prompt_integer(int *target
, const char *msg
)
355 char *buf
= malloc(0), *p
;
359 fprintf(stdout
, "\n%s: ", msg
);
360 if (getline(&buf
, &dummy
, stdin
) < 0)
363 p
= strchr(buf
, '\n');
373 tmp
= strtoul(buf
, NULL
, 10);
379 static void prompt_percent(int *target
, const char *msg
)
383 prompt_integer(&tmp
, msg
);
384 if (tmp
>= 0 && tmp
<= 100)
388 static void perf_top__prompt_symbol(struct perf_top
*top
, const char *msg
)
390 char *buf
= malloc(0), *p
;
391 struct hist_entry
*syme
= top
->sym_filter_entry
, *n
, *found
= NULL
;
392 struct hists
*hists
= evsel__hists(top
->sym_evsel
);
393 struct rb_node
*next
;
396 /* zero counters of active symbol */
398 __zero_source_counters(syme
);
399 top
->sym_filter_entry
= NULL
;
402 fprintf(stdout
, "\n%s: ", msg
);
403 if (getline(&buf
, &dummy
, stdin
) < 0)
406 p
= strchr(buf
, '\n');
410 next
= rb_first_cached(&hists
->entries
);
412 n
= rb_entry(next
, struct hist_entry
, rb_node
);
413 if (n
->ms
.sym
&& !strcmp(buf
, n
->ms
.sym
->name
)) {
417 next
= rb_next(&n
->rb_node
);
421 fprintf(stderr
, "Sorry, %s is not active.\n", buf
);
424 perf_top__parse_source(top
, found
);
430 static void perf_top__print_mapped_keys(struct perf_top
*top
)
434 if (top
->sym_filter_entry
) {
435 struct symbol
*sym
= top
->sym_filter_entry
->ms
.sym
;
439 fprintf(stdout
, "\nMapped keys:\n");
440 fprintf(stdout
, "\t[d] display refresh delay. \t(%d)\n", top
->delay_secs
);
441 fprintf(stdout
, "\t[e] display entries (lines). \t(%d)\n", top
->print_entries
);
443 if (top
->evlist
->core
.nr_entries
> 1)
444 fprintf(stdout
, "\t[E] active event counter. \t(%s)\n", perf_evsel__name(top
->sym_evsel
));
446 fprintf(stdout
, "\t[f] profile display filter (count). \t(%d)\n", top
->count_filter
);
448 fprintf(stdout
, "\t[F] annotate display filter (percent). \t(%d%%)\n", top
->annotation_opts
.min_pcnt
);
449 fprintf(stdout
, "\t[s] annotate symbol. \t(%s)\n", name
?: "NULL");
450 fprintf(stdout
, "\t[S] stop annotation.\n");
453 "\t[K] hide kernel symbols. \t(%s)\n",
454 top
->hide_kernel_symbols
? "yes" : "no");
456 "\t[U] hide user symbols. \t(%s)\n",
457 top
->hide_user_symbols
? "yes" : "no");
458 fprintf(stdout
, "\t[z] toggle sample zeroing. \t(%d)\n", top
->zero
? 1 : 0);
459 fprintf(stdout
, "\t[qQ] quit.\n");
462 static int perf_top__key_mapped(struct perf_top
*top
, int c
)
478 return top
->evlist
->core
.nr_entries
> 1 ? 1 : 0;
486 static bool perf_top__handle_keypress(struct perf_top
*top
, int c
)
490 if (!perf_top__key_mapped(top
, c
)) {
491 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
494 perf_top__print_mapped_keys(top
);
495 fprintf(stdout
, "\nEnter selection, or unmapped key to continue: ");
498 set_term_quiet_input(&save
);
500 poll(&stdin_poll
, 1, -1);
503 tcsetattr(0, TCSAFLUSH
, &save
);
504 if (!perf_top__key_mapped(top
, c
))
510 prompt_integer(&top
->delay_secs
, "Enter display delay");
511 if (top
->delay_secs
< 1)
515 prompt_integer(&top
->print_entries
, "Enter display entries (lines)");
516 if (top
->print_entries
== 0) {
517 perf_top__resize(top
);
518 signal(SIGWINCH
, winch_sig
);
520 signal(SIGWINCH
, SIG_DFL
);
524 if (top
->evlist
->core
.nr_entries
> 1) {
525 /* Select 0 as the default event: */
528 fprintf(stderr
, "\nAvailable events:");
530 evlist__for_each_entry(top
->evlist
, top
->sym_evsel
)
531 fprintf(stderr
, "\n\t%d %s", top
->sym_evsel
->idx
, perf_evsel__name(top
->sym_evsel
));
533 prompt_integer(&counter
, "Enter details event counter");
535 if (counter
>= top
->evlist
->core
.nr_entries
) {
536 top
->sym_evsel
= evlist__first(top
->evlist
);
537 fprintf(stderr
, "Sorry, no such event, using %s.\n", perf_evsel__name(top
->sym_evsel
));
541 evlist__for_each_entry(top
->evlist
, top
->sym_evsel
)
542 if (top
->sym_evsel
->idx
== counter
)
545 top
->sym_evsel
= evlist__first(top
->evlist
);
548 prompt_integer(&top
->count_filter
, "Enter display event count filter");
551 prompt_percent(&top
->annotation_opts
.min_pcnt
,
552 "Enter details display event filter (percent)");
555 top
->hide_kernel_symbols
= !top
->hide_kernel_symbols
;
559 printf("exiting.\n");
560 if (top
->dump_symtab
)
561 perf_session__fprintf_dsos(top
->session
, stderr
);
565 perf_top__prompt_symbol(top
, "Enter details symbol");
568 if (!top
->sym_filter_entry
)
571 struct hist_entry
*syme
= top
->sym_filter_entry
;
573 top
->sym_filter_entry
= NULL
;
574 __zero_source_counters(syme
);
578 top
->hide_user_symbols
= !top
->hide_user_symbols
;
581 top
->zero
= !top
->zero
;
590 static void perf_top__sort_new_samples(void *arg
)
592 struct perf_top
*t
= arg
;
594 if (t
->evlist
->selected
!= NULL
)
595 t
->sym_evsel
= t
->evlist
->selected
;
597 perf_top__resort_hists(t
);
599 if (t
->lost
|| t
->drop
)
600 pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
603 static void stop_top(void)
609 static void *display_thread_tui(void *arg
)
612 struct perf_top
*top
= arg
;
613 const char *help
= "For a higher level overview, try: perf top --sort comm,dso";
614 struct hist_browser_timer hbt
= {
615 .timer
= perf_top__sort_new_samples
,
617 .refresh
= top
->delay_secs
,
621 /* In order to read symbols from other namespaces perf to needs to call
622 * setns(2). This isn't permitted if the struct_fs has multiple users.
623 * unshare(2) the fs so that we may continue to setns into namespaces
624 * that we're observing.
628 prctl(PR_SET_NAME
, "perf-top-UI", 0, 0, 0);
631 perf_top__sort_new_samples(top
);
634 * Initialize the uid_filter_str, in the future the TUI will allow
635 * Zooming in/out UIDs. For now just use whatever the user passed
638 evlist__for_each_entry(top
->evlist
, pos
) {
639 struct hists
*hists
= evsel__hists(pos
);
640 hists
->uid_filter_str
= top
->record_opts
.target
.uid_str
;
643 ret
= perf_evlist__tui_browse_hists(top
->evlist
, help
, &hbt
,
645 &top
->session
->header
.env
,
646 !top
->record_opts
.overwrite
,
647 &top
->annotation_opts
);
649 if (ret
== K_RELOAD
) {
658 static void display_sig(int sig __maybe_unused
)
663 static void display_setup_sig(void)
665 signal(SIGSEGV
, sighandler_dump_stack
);
666 signal(SIGFPE
, sighandler_dump_stack
);
667 signal(SIGINT
, display_sig
);
668 signal(SIGQUIT
, display_sig
);
669 signal(SIGTERM
, display_sig
);
672 static void *display_thread(void *arg
)
674 struct pollfd stdin_poll
= { .fd
= 0, .events
= POLLIN
};
676 struct perf_top
*top
= arg
;
679 /* In order to read symbols from other namespaces perf to needs to call
680 * setns(2). This isn't permitted if the struct_fs has multiple users.
681 * unshare(2) the fs so that we may continue to setns into namespaces
682 * that we're observing.
686 prctl(PR_SET_NAME
, "perf-top-UI", 0, 0, 0);
689 pthread__unblock_sigwinch();
691 delay_msecs
= top
->delay_secs
* MSEC_PER_SEC
;
692 set_term_quiet_input(&save
);
695 if (poll(&stdin_poll
, 1, 0) > 0)
699 perf_top__print_sym_table(top
);
701 * Either timeout expired or we got an EINTR due to SIGWINCH,
702 * refresh screen in both cases.
704 switch (poll(&stdin_poll
, 1, delay_msecs
)) {
713 tcsetattr(0, TCSAFLUSH
, &save
);
715 if (perf_top__handle_keypress(top
, c
))
721 tcsetattr(0, TCSAFLUSH
, &save
);
725 static int hist_iter__top_callback(struct hist_entry_iter
*iter
,
726 struct addr_location
*al
, bool single
,
729 struct perf_top
*top
= arg
;
730 struct hist_entry
*he
= iter
->he
;
731 struct evsel
*evsel
= iter
->evsel
;
733 if (perf_hpp_list
.sym
&& single
)
734 perf_top__record_precise_ip(top
, he
, iter
->sample
, evsel
, al
->addr
);
736 hist__account_cycles(iter
->sample
->branch_stack
, al
, iter
->sample
,
737 !(top
->record_opts
.branch_stack
& PERF_SAMPLE_BRANCH_ANY
),
742 static void perf_event__process_sample(struct perf_tool
*tool
,
743 const union perf_event
*event
,
745 struct perf_sample
*sample
,
746 struct machine
*machine
)
748 struct perf_top
*top
= container_of(tool
, struct perf_top
, tool
);
749 struct addr_location al
;
752 if (!machine
&& perf_guest
) {
753 static struct intlist
*seen
;
756 seen
= intlist__new(NULL
);
758 if (!intlist__has_entry(seen
, sample
->pid
)) {
759 pr_err("Can't find guest [%d]'s kernel information\n",
761 intlist__add(seen
, sample
->pid
);
767 pr_err("%u unprocessable samples recorded.\r",
768 top
->session
->evlist
->stats
.nr_unprocessable_samples
++);
772 if (event
->header
.misc
& PERF_RECORD_MISC_EXACT_IP
)
773 top
->exact_samples
++;
775 if (machine__resolve(machine
, &al
, sample
) < 0)
778 if (!machine
->kptr_restrict_warned
&&
779 symbol_conf
.kptr_restrict
&&
780 al
.cpumode
== PERF_RECORD_MISC_KERNEL
) {
781 if (!perf_evlist__exclude_kernel(top
->session
->evlist
)) {
783 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
784 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
785 "Kernel%s samples will not be resolved.\n",
786 al
.map
&& map__has_symbols(al
.map
) ?
788 if (use_browser
<= 0)
791 machine
->kptr_restrict_warned
= true;
794 if (al
.sym
== NULL
&& al
.map
!= NULL
) {
795 const char *msg
= "Kernel samples will not be resolved.\n";
797 * As we do lazy loading of symtabs we only will know if the
798 * specified vmlinux file is invalid when we actually have a
799 * hit in kernel space and then try to load it. So if we get
800 * here and there are _no_ symbols in the DSO backing the
801 * kernel map, bail out.
803 * We may never get here, for instance, if we use -K/
804 * --hide-kernel-symbols, even if the user specifies an
805 * invalid --vmlinux ;-)
807 if (!machine
->kptr_restrict_warned
&& !top
->vmlinux_warned
&&
808 __map__is_kernel(al
.map
) && map__has_symbols(al
.map
)) {
809 if (symbol_conf
.vmlinux_name
) {
811 dso__strerror_load(al
.map
->dso
, serr
, sizeof(serr
));
812 ui__warning("The %s file can't be used: %s\n%s",
813 symbol_conf
.vmlinux_name
, serr
, msg
);
815 ui__warning("A vmlinux file was not found.\n%s",
819 if (use_browser
<= 0)
821 top
->vmlinux_warned
= true;
825 if (al
.sym
== NULL
|| !al
.sym
->idle
) {
826 struct hists
*hists
= evsel__hists(evsel
);
827 struct hist_entry_iter iter
= {
830 .add_entry_cb
= hist_iter__top_callback
,
833 if (symbol_conf
.cumulate_callchain
)
834 iter
.ops
= &hist_iter_cumulative
;
836 iter
.ops
= &hist_iter_normal
;
838 pthread_mutex_lock(&hists
->lock
);
840 err
= hist_entry_iter__add(&iter
, &al
, top
->max_stack
, top
);
842 pr_err("Problem incrementing symbol period, skipping event\n");
844 pthread_mutex_unlock(&hists
->lock
);
847 addr_location__put(&al
);
851 perf_top__process_lost(struct perf_top
*top
, union perf_event
*event
,
854 struct hists
*hists
= evsel__hists(evsel
);
856 top
->lost
+= event
->lost
.lost
;
857 top
->lost_total
+= event
->lost
.lost
;
858 hists
->stats
.total_lost
+= event
->lost
.lost
;
862 perf_top__process_lost_samples(struct perf_top
*top
,
863 union perf_event
*event
,
866 struct hists
*hists
= evsel__hists(evsel
);
868 top
->lost
+= event
->lost_samples
.lost
;
869 top
->lost_total
+= event
->lost_samples
.lost
;
870 hists
->stats
.total_lost_samples
+= event
->lost_samples
.lost
;
873 static u64 last_timestamp
;
875 static void perf_top__mmap_read_idx(struct perf_top
*top
, int idx
)
877 struct record_opts
*opts
= &top
->record_opts
;
878 struct evlist
*evlist
= top
->evlist
;
880 union perf_event
*event
;
882 md
= opts
->overwrite
? &evlist
->overwrite_mmap
[idx
] : &evlist
->mmap
[idx
];
883 if (perf_mmap__read_init(&md
->core
) < 0)
886 while ((event
= perf_mmap__read_event(&md
->core
)) != NULL
) {
889 ret
= perf_evlist__parse_sample_timestamp(evlist
, event
, &last_timestamp
);
890 if (ret
&& ret
!= -1)
893 ret
= ordered_events__queue(top
->qe
.in
, event
, last_timestamp
, 0);
897 perf_mmap__consume(&md
->core
);
899 if (top
->qe
.rotate
) {
900 pthread_mutex_lock(&top
->qe
.mutex
);
901 top
->qe
.rotate
= false;
902 pthread_cond_signal(&top
->qe
.cond
);
903 pthread_mutex_unlock(&top
->qe
.mutex
);
907 perf_mmap__read_done(&md
->core
);
910 static void perf_top__mmap_read(struct perf_top
*top
)
912 bool overwrite
= top
->record_opts
.overwrite
;
913 struct evlist
*evlist
= top
->evlist
;
917 perf_evlist__toggle_bkw_mmap(evlist
, BKW_MMAP_DATA_PENDING
);
919 for (i
= 0; i
< top
->evlist
->core
.nr_mmaps
; i
++)
920 perf_top__mmap_read_idx(top
, i
);
923 perf_evlist__toggle_bkw_mmap(evlist
, BKW_MMAP_EMPTY
);
924 perf_evlist__toggle_bkw_mmap(evlist
, BKW_MMAP_RUNNING
);
929 * Check per-event overwrite term.
930 * perf top should support consistent term for all events.
931 * - All events don't have per-event term
932 * E.g. "cpu/cpu-cycles/,cpu/instructions/"
933 * Nothing change, return 0.
934 * - All events have same per-event term
935 * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
936 * Using the per-event setting to replace the opts->overwrite if
937 * they are different, then return 0.
938 * - Events have different per-event term
939 * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
941 * - Some of the event set per-event term, but some not.
942 * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
945 static int perf_top__overwrite_check(struct perf_top
*top
)
947 struct record_opts
*opts
= &top
->record_opts
;
948 struct evlist
*evlist
= top
->evlist
;
949 struct perf_evsel_config_term
*term
;
950 struct list_head
*config_terms
;
952 int set
, overwrite
= -1;
954 evlist__for_each_entry(evlist
, evsel
) {
956 config_terms
= &evsel
->config_terms
;
957 list_for_each_entry(term
, config_terms
, list
) {
958 if (term
->type
== PERF_EVSEL__CONFIG_TERM_OVERWRITE
)
959 set
= term
->val
.overwrite
? 1 : 0;
962 /* no term for current and previous event (likely) */
963 if ((overwrite
< 0) && (set
< 0))
966 /* has term for both current and previous event, compare */
967 if ((overwrite
>= 0) && (set
>= 0) && (overwrite
!= set
))
970 /* no term for current event but has term for previous one */
971 if ((overwrite
>= 0) && (set
< 0))
974 /* has term for current event */
975 if ((overwrite
< 0) && (set
>= 0)) {
976 /* if it's first event, set overwrite */
977 if (evsel
== evlist__first(evlist
))
984 if ((overwrite
>= 0) && (opts
->overwrite
!= overwrite
))
985 opts
->overwrite
= overwrite
;
990 static int perf_top_overwrite_fallback(struct perf_top
*top
,
993 struct record_opts
*opts
= &top
->record_opts
;
994 struct evlist
*evlist
= top
->evlist
;
995 struct evsel
*counter
;
997 if (!opts
->overwrite
)
1000 /* only fall back when first event fails */
1001 if (evsel
!= evlist__first(evlist
))
1004 evlist__for_each_entry(evlist
, counter
)
1005 counter
->core
.attr
.write_backward
= false;
1006 opts
->overwrite
= false;
1007 pr_debug2("fall back to non-overwrite mode\n");
1011 static int perf_top__start_counters(struct perf_top
*top
)
1014 struct evsel
*counter
;
1015 struct evlist
*evlist
= top
->evlist
;
1016 struct record_opts
*opts
= &top
->record_opts
;
1018 if (perf_top__overwrite_check(top
)) {
1019 ui__error("perf top only support consistent per-event "
1020 "overwrite setting for all events\n");
1024 perf_evlist__config(evlist
, opts
, &callchain_param
);
1026 evlist__for_each_entry(evlist
, counter
) {
1028 if (evsel__open(counter
, top
->evlist
->core
.cpus
,
1029 top
->evlist
->core
.threads
) < 0) {
1032 * Specially handle overwrite fall back.
1033 * Because perf top is the only tool which has
1034 * overwrite mode by default, support
1035 * both overwrite and non-overwrite mode, and
1036 * require consistent mode for all events.
1038 * May move it to generic code with more tools
1039 * have similar attribute.
1041 if (perf_missing_features
.write_backward
&&
1042 perf_top_overwrite_fallback(top
, counter
))
1045 if (perf_evsel__fallback(counter
, errno
, msg
, sizeof(msg
))) {
1047 ui__warning("%s\n", msg
);
1051 perf_evsel__open_strerror(counter
, &opts
->target
,
1052 errno
, msg
, sizeof(msg
));
1053 ui__error("%s\n", msg
);
1058 if (evlist__mmap(evlist
, opts
->mmap_pages
) < 0) {
1059 ui__error("Failed to mmap with %d (%s)\n",
1060 errno
, str_error_r(errno
, msg
, sizeof(msg
)));
1070 static int callchain_param__setup_sample_type(struct callchain_param
*callchain
)
1072 if (callchain
->mode
!= CHAIN_NONE
) {
1073 if (callchain_register_param(callchain
) < 0) {
1074 ui__error("Can't register callchain params.\n");
1082 static struct ordered_events
*rotate_queues(struct perf_top
*top
)
1084 struct ordered_events
*in
= top
->qe
.in
;
1086 if (top
->qe
.in
== &top
->qe
.data
[1])
1087 top
->qe
.in
= &top
->qe
.data
[0];
1089 top
->qe
.in
= &top
->qe
.data
[1];
1094 static void *process_thread(void *arg
)
1096 struct perf_top
*top
= arg
;
1099 struct ordered_events
*out
, *in
= top
->qe
.in
;
1101 if (!in
->nr_events
) {
1106 out
= rotate_queues(top
);
1108 pthread_mutex_lock(&top
->qe
.mutex
);
1109 top
->qe
.rotate
= true;
1110 pthread_cond_wait(&top
->qe
.cond
, &top
->qe
.mutex
);
1111 pthread_mutex_unlock(&top
->qe
.mutex
);
1113 if (ordered_events__flush(out
, OE_FLUSH__TOP
))
1114 pr_err("failed to process events\n");
1121 * Allow only 'top->delay_secs' seconds behind samples.
1123 static int should_drop(struct ordered_event
*qevent
, struct perf_top
*top
)
1125 union perf_event
*event
= qevent
->event
;
1126 u64 delay_timestamp
;
1128 if (event
->header
.type
!= PERF_RECORD_SAMPLE
)
1131 delay_timestamp
= qevent
->timestamp
+ top
->delay_secs
* NSEC_PER_SEC
;
1132 return delay_timestamp
< last_timestamp
;
1135 static int deliver_event(struct ordered_events
*qe
,
1136 struct ordered_event
*qevent
)
1138 struct perf_top
*top
= qe
->data
;
1139 struct evlist
*evlist
= top
->evlist
;
1140 struct perf_session
*session
= top
->session
;
1141 union perf_event
*event
= qevent
->event
;
1142 struct perf_sample sample
;
1143 struct evsel
*evsel
;
1144 struct machine
*machine
;
1147 if (should_drop(qevent
, top
)) {
1153 ret
= perf_evlist__parse_sample(evlist
, event
, &sample
);
1155 pr_err("Can't parse sample, err = %d\n", ret
);
1159 evsel
= perf_evlist__id2evsel(session
->evlist
, sample
.id
);
1160 assert(evsel
!= NULL
);
1162 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
1163 if (evswitch__discard(&top
->evswitch
, evsel
))
1168 switch (sample
.cpumode
) {
1169 case PERF_RECORD_MISC_USER
:
1171 if (top
->hide_user_symbols
)
1173 machine
= &session
->machines
.host
;
1175 case PERF_RECORD_MISC_KERNEL
:
1176 ++top
->kernel_samples
;
1177 if (top
->hide_kernel_symbols
)
1179 machine
= &session
->machines
.host
;
1181 case PERF_RECORD_MISC_GUEST_KERNEL
:
1182 ++top
->guest_kernel_samples
;
1183 machine
= perf_session__find_machine(session
,
1186 case PERF_RECORD_MISC_GUEST_USER
:
1187 ++top
->guest_us_samples
;
1189 * TODO: we don't process guest user from host side
1190 * except simple counting.
1194 if (event
->header
.type
== PERF_RECORD_SAMPLE
)
1196 machine
= &session
->machines
.host
;
1200 if (event
->header
.type
== PERF_RECORD_SAMPLE
) {
1201 perf_event__process_sample(&top
->tool
, event
, evsel
,
1203 } else if (event
->header
.type
== PERF_RECORD_LOST
) {
1204 perf_top__process_lost(top
, event
, evsel
);
1205 } else if (event
->header
.type
== PERF_RECORD_LOST_SAMPLES
) {
1206 perf_top__process_lost_samples(top
, event
, evsel
);
1207 } else if (event
->header
.type
< PERF_RECORD_MAX
) {
1208 hists__inc_nr_events(evsel__hists(evsel
), event
->header
.type
);
1209 machine__process_event(machine
, event
, &sample
);
1211 ++session
->evlist
->stats
.nr_unknown_events
;
1218 static void init_process_thread(struct perf_top
*top
)
1220 ordered_events__init(&top
->qe
.data
[0], deliver_event
, top
);
1221 ordered_events__init(&top
->qe
.data
[1], deliver_event
, top
);
1222 ordered_events__set_copy_on_queue(&top
->qe
.data
[0], true);
1223 ordered_events__set_copy_on_queue(&top
->qe
.data
[1], true);
1224 top
->qe
.in
= &top
->qe
.data
[0];
1225 pthread_mutex_init(&top
->qe
.mutex
, NULL
);
1226 pthread_cond_init(&top
->qe
.cond
, NULL
);
1229 static int __cmd_top(struct perf_top
*top
)
1231 struct record_opts
*opts
= &top
->record_opts
;
1232 pthread_t thread
, thread_process
;
1235 if (!top
->annotation_opts
.objdump_path
) {
1236 ret
= perf_env__lookup_objdump(&top
->session
->header
.env
,
1237 &top
->annotation_opts
.objdump_path
);
1242 ret
= callchain_param__setup_sample_type(&callchain_param
);
1246 if (perf_session__register_idle_thread(top
->session
) < 0)
1249 if (top
->nr_threads_synthesize
> 1)
1250 perf_set_multithreaded();
1252 init_process_thread(top
);
1254 if (opts
->record_namespaces
)
1255 top
->tool
.namespace_events
= true;
1256 if (opts
->record_cgroup
) {
1257 #ifdef HAVE_FILE_HANDLE
1258 top
->tool
.cgroup_events
= true;
1260 pr_err("cgroup tracking is not supported.\n");
1265 ret
= perf_event__synthesize_bpf_events(top
->session
, perf_event__process
,
1266 &top
->session
->machines
.host
,
1269 pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
1271 ret
= perf_event__synthesize_cgroups(&top
->tool
, perf_event__process
,
1272 &top
->session
->machines
.host
);
1274 pr_debug("Couldn't synthesize cgroup events.\n");
1276 machine__synthesize_threads(&top
->session
->machines
.host
, &opts
->target
,
1277 top
->evlist
->core
.threads
, false,
1278 top
->nr_threads_synthesize
);
1280 if (top
->nr_threads_synthesize
> 1)
1281 perf_set_singlethreaded();
1283 if (perf_hpp_list
.socket
) {
1284 ret
= perf_env__read_cpu_topology_map(&perf_env
);
1286 char errbuf
[BUFSIZ
];
1287 const char *err
= str_error_r(-ret
, errbuf
, sizeof(errbuf
));
1289 ui__error("Could not read the CPU topology map: %s\n", err
);
1294 ret
= perf_top__start_counters(top
);
1298 top
->session
->evlist
= top
->evlist
;
1299 perf_session__set_id_hdr_size(top
->session
);
1302 * When perf is starting the traced process, all the events (apart from
1303 * group members) have enable_on_exec=1 set, so don't spoil it by
1304 * prematurely enabling them.
1306 * XXX 'top' still doesn't start workloads like record, trace, but should,
1307 * so leave the check here.
1309 if (!target__none(&opts
->target
))
1310 evlist__enable(top
->evlist
);
1313 if (pthread_create(&thread_process
, NULL
, process_thread
, top
)) {
1314 ui__error("Could not create process thread.\n");
1318 if (pthread_create(&thread
, NULL
, (use_browser
> 0 ? display_thread_tui
:
1319 display_thread
), top
)) {
1320 ui__error("Could not create display thread.\n");
1321 goto out_join_thread
;
1324 if (top
->realtime_prio
) {
1325 struct sched_param param
;
1327 param
.sched_priority
= top
->realtime_prio
;
1328 if (sched_setscheduler(0, SCHED_FIFO
, ¶m
)) {
1329 ui__error("Could not set realtime priority.\n");
1334 /* Wait for a minimal set of events before starting the snapshot */
1335 evlist__poll(top
->evlist
, 100);
1337 perf_top__mmap_read(top
);
1340 u64 hits
= top
->samples
;
1342 perf_top__mmap_read(top
);
1344 if (opts
->overwrite
|| (hits
== top
->samples
))
1345 ret
= evlist__poll(top
->evlist
, 100);
1348 perf_top__resize(top
);
1355 pthread_join(thread
, NULL
);
1357 pthread_cond_signal(&top
->qe
.cond
);
1358 pthread_join(thread_process
, NULL
);
1363 callchain_opt(const struct option
*opt
, const char *arg
, int unset
)
1365 symbol_conf
.use_callchain
= true;
1366 return record_callchain_opt(opt
, arg
, unset
);
1370 parse_callchain_opt(const struct option
*opt
, const char *arg
, int unset
)
1372 struct callchain_param
*callchain
= opt
->value
;
1374 callchain
->enabled
= !unset
;
1375 callchain
->record_mode
= CALLCHAIN_FP
;
1381 symbol_conf
.use_callchain
= false;
1382 callchain
->record_mode
= CALLCHAIN_NONE
;
1386 return parse_callchain_top_opt(arg
);
1389 static int perf_top_config(const char *var
, const char *value
, void *cb __maybe_unused
)
1391 if (!strcmp(var
, "top.call-graph")) {
1392 var
= "call-graph.record-mode";
1393 return perf_default_config(var
, value
, cb
);
1395 if (!strcmp(var
, "top.children")) {
1396 symbol_conf
.cumulate_callchain
= perf_config_bool(var
, value
);
1404 parse_percent_limit(const struct option
*opt
, const char *arg
,
1405 int unset __maybe_unused
)
1407 struct perf_top
*top
= opt
->value
;
1409 top
->min_percent
= strtof(arg
, NULL
);
1413 const char top_callchain_help
[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1414 "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1416 int cmd_top(int argc
, const char **argv
)
1418 char errbuf
[BUFSIZ
];
1419 struct perf_top top
= {
1423 .mmap_pages
= UINT_MAX
,
1424 .user_freq
= UINT_MAX
,
1425 .user_interval
= ULLONG_MAX
,
1426 .freq
= 4000, /* 4 KHz */
1431 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1432 * when we pause, fix that and reenable. Probably using a
1433 * separate evlist with a dummy event, i.e. a non-overwrite
1434 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1435 * stays in overwrite mode. -acme
1438 .sample_time
= true,
1439 .sample_time_set
= true,
1441 .max_stack
= sysctl__max_stack(),
1442 .annotation_opts
= annotation__default_options
,
1443 .nr_threads_synthesize
= UINT_MAX
,
1445 struct record_opts
*opts
= &top
.record_opts
;
1446 struct target
*target
= &opts
->target
;
1447 const struct option options
[] = {
1448 OPT_CALLBACK('e', "event", &top
.evlist
, "event",
1449 "event selector. use 'perf list' to list available events",
1450 parse_events_option
),
1451 OPT_U64('c', "count", &opts
->user_interval
, "event period to sample"),
1452 OPT_STRING('p', "pid", &target
->pid
, "pid",
1453 "profile events on existing process id"),
1454 OPT_STRING('t', "tid", &target
->tid
, "tid",
1455 "profile events on existing thread id"),
1456 OPT_BOOLEAN('a', "all-cpus", &target
->system_wide
,
1457 "system-wide collection from all CPUs"),
1458 OPT_STRING('C', "cpu", &target
->cpu_list
, "cpu",
1459 "list of cpus to monitor"),
1460 OPT_STRING('k', "vmlinux", &symbol_conf
.vmlinux_name
,
1461 "file", "vmlinux pathname"),
1462 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf
.ignore_vmlinux
,
1463 "don't load vmlinux even if found"),
1464 OPT_STRING(0, "kallsyms", &symbol_conf
.kallsyms_name
,
1465 "file", "kallsyms pathname"),
1466 OPT_BOOLEAN('K', "hide_kernel_symbols", &top
.hide_kernel_symbols
,
1467 "hide kernel symbols"),
1468 OPT_CALLBACK('m', "mmap-pages", &opts
->mmap_pages
, "pages",
1469 "number of mmap data pages",
1470 perf_evlist__parse_mmap_pages
),
1471 OPT_INTEGER('r', "realtime", &top
.realtime_prio
,
1472 "collect data with this RT SCHED_FIFO priority"),
1473 OPT_INTEGER('d', "delay", &top
.delay_secs
,
1474 "number of seconds to delay between refreshes"),
1475 OPT_BOOLEAN('D', "dump-symtab", &top
.dump_symtab
,
1476 "dump the symbol table used for profiling"),
1477 OPT_INTEGER('f', "count-filter", &top
.count_filter
,
1478 "only display functions with more events than this"),
1479 OPT_BOOLEAN(0, "group", &opts
->group
,
1480 "put the counters into a counter group"),
1481 OPT_BOOLEAN('i', "no-inherit", &opts
->no_inherit
,
1482 "child tasks do not inherit counters"),
1483 OPT_STRING(0, "sym-annotate", &top
.sym_filter
, "symbol name",
1484 "symbol to annotate"),
1485 OPT_BOOLEAN('z', "zero", &top
.zero
, "zero history across updates"),
1486 OPT_CALLBACK('F', "freq", &top
.record_opts
, "freq or 'max'",
1487 "profile at this frequency",
1488 record__parse_freq
),
1489 OPT_INTEGER('E', "entries", &top
.print_entries
,
1490 "display this many functions"),
1491 OPT_BOOLEAN('U', "hide_user_symbols", &top
.hide_user_symbols
,
1492 "hide user symbols"),
1493 OPT_BOOLEAN(0, "tui", &top
.use_tui
, "Use the TUI interface"),
1494 OPT_BOOLEAN(0, "stdio", &top
.use_stdio
, "Use the stdio interface"),
1495 OPT_INCR('v', "verbose", &verbose
,
1496 "be more verbose (show counter open errors, etc)"),
1497 OPT_STRING('s', "sort", &sort_order
, "key[,key2...]",
1498 "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1499 " Please refer the man page for the complete list."),
1500 OPT_STRING(0, "fields", &field_order
, "key[,keys...]",
1501 "output field(s): overhead, period, sample plus all of sort keys"),
1502 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf
.show_nr_samples
,
1503 "Show a column with the number of samples"),
1504 OPT_CALLBACK_NOOPT('g', NULL
, &callchain_param
,
1505 NULL
, "enables call-graph recording and display",
1507 OPT_CALLBACK(0, "call-graph", &callchain_param
,
1508 "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1509 top_callchain_help
, &parse_callchain_opt
),
1510 OPT_BOOLEAN(0, "children", &symbol_conf
.cumulate_callchain
,
1511 "Accumulate callchains of children and show total overhead as well"),
1512 OPT_INTEGER(0, "max-stack", &top
.max_stack
,
1513 "Set the maximum stack depth when parsing the callchain. "
1514 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH
)),
1515 OPT_CALLBACK(0, "ignore-callees", NULL
, "regex",
1516 "ignore callees of these functions in call graphs",
1517 report_parse_ignore_callees_opt
),
1518 OPT_BOOLEAN(0, "show-total-period", &symbol_conf
.show_total_period
,
1519 "Show a column with the sum of periods"),
1520 OPT_STRING(0, "dsos", &symbol_conf
.dso_list_str
, "dso[,dso...]",
1521 "only consider symbols in these dsos"),
1522 OPT_STRING(0, "comms", &symbol_conf
.comm_list_str
, "comm[,comm...]",
1523 "only consider symbols in these comms"),
1524 OPT_STRING(0, "symbols", &symbol_conf
.sym_list_str
, "symbol[,symbol...]",
1525 "only consider these symbols"),
1526 OPT_BOOLEAN(0, "source", &top
.annotation_opts
.annotate_src
,
1527 "Interleave source code with assembly code (default)"),
1528 OPT_BOOLEAN(0, "asm-raw", &top
.annotation_opts
.show_asm_raw
,
1529 "Display raw encoding of assembly instructions (default)"),
1530 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf
.demangle_kernel
,
1531 "Enable kernel symbol demangling"),
1532 OPT_BOOLEAN(0, "no-bpf-event", &top
.record_opts
.no_bpf_event
, "do not record bpf events"),
1533 OPT_STRING(0, "objdump", &top
.annotation_opts
.objdump_path
, "path",
1534 "objdump binary to use for disassembly and annotations"),
1535 OPT_STRING('M', "disassembler-style", &top
.annotation_opts
.disassembler_style
, "disassembler style",
1536 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1537 OPT_STRING(0, "prefix", &top
.annotation_opts
.prefix
, "prefix",
1538 "Add prefix to source file path names in programs (with --prefix-strip)"),
1539 OPT_STRING(0, "prefix-strip", &top
.annotation_opts
.prefix_strip
, "N",
1540 "Strip first N entries of source file path name in programs (with --prefix)"),
1541 OPT_STRING('u', "uid", &target
->uid_str
, "user", "user to profile"),
1542 OPT_CALLBACK(0, "percent-limit", &top
, "percent",
1543 "Don't show entries under that percent", parse_percent_limit
),
1544 OPT_CALLBACK(0, "percentage", NULL
, "relative|absolute",
1545 "How to display percentage of filtered entries", parse_filter_percentage
),
1546 OPT_STRING('w', "column-widths", &symbol_conf
.col_width_list_str
,
1548 "don't try to adjust column width, use these fixed values"),
1549 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout
,
1550 "per thread proc mmap processing timeout in ms"),
1551 OPT_CALLBACK_NOOPT('b', "branch-any", &opts
->branch_stack
,
1552 "branch any", "sample any taken branches",
1553 parse_branch_stack
),
1554 OPT_CALLBACK('j', "branch-filter", &opts
->branch_stack
,
1555 "branch filter mask", "branch stack filter modes",
1556 parse_branch_stack
),
1557 OPT_BOOLEAN(0, "raw-trace", &symbol_conf
.raw_trace
,
1558 "Show raw trace event output (do not use print fmt or plugins)"),
1559 OPT_BOOLEAN(0, "hierarchy", &symbol_conf
.report_hierarchy
,
1560 "Show entries in a hierarchy"),
1561 OPT_BOOLEAN(0, "overwrite", &top
.record_opts
.overwrite
,
1562 "Use a backward ring buffer, default: no"),
1563 OPT_BOOLEAN(0, "force", &symbol_conf
.force
, "don't complain, do it"),
1564 OPT_UINTEGER(0, "num-thread-synthesize", &top
.nr_threads_synthesize
,
1565 "number of thread to run event synthesize"),
1566 OPT_BOOLEAN(0, "namespaces", &opts
->record_namespaces
,
1567 "Record namespaces events"),
1568 OPT_BOOLEAN(0, "all-cgroups", &opts
->record_cgroup
,
1569 "Record cgroup events"),
1570 OPT_INTEGER(0, "group-sort-idx", &symbol_conf
.group_sort_idx
,
1571 "Sort the output by the event at the index n in group. "
1572 "If n is invalid, sort by the first event. "
1573 "WARNING: should be used on grouped events."),
1574 OPTS_EVSWITCH(&top
.evswitch
),
1577 struct evlist
*sb_evlist
= NULL
;
1578 const char * const top_usage
[] = {
1579 "perf top [<options>]",
1582 int status
= hists__init();
1587 top
.annotation_opts
.min_pcnt
= 5;
1588 top
.annotation_opts
.context
= 4;
1590 top
.evlist
= evlist__new();
1591 if (top
.evlist
== NULL
)
1594 status
= perf_config(perf_top_config
, &top
);
1598 * Since the per arch annotation init routine may need the cpuid, read
1599 * it here, since we are not getting this from the perf.data header.
1601 status
= perf_env__read_cpuid(&perf_env
);
1604 * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
1605 * warn the user explicitely.
1607 eprintf(status
== ENOSYS
? 1 : 0, verbose
,
1608 "Couldn't read the cpuid for this machine: %s\n",
1609 str_error_r(errno
, errbuf
, sizeof(errbuf
)));
1611 top
.evlist
->env
= &perf_env
;
1613 argc
= parse_options(argc
, argv
, options
, top_usage
, 0);
1615 usage_with_options(top_usage
, options
);
1617 if (annotate_check_args(&top
.annotation_opts
) < 0)
1618 goto out_delete_evlist
;
1620 if (!top
.evlist
->core
.nr_entries
&&
1621 perf_evlist__add_default(top
.evlist
) < 0) {
1622 pr_err("Not enough memory for event selector list\n");
1623 goto out_delete_evlist
;
1626 status
= evswitch__init(&top
.evswitch
, top
.evlist
, stderr
);
1628 goto out_delete_evlist
;
1630 if (symbol_conf
.report_hierarchy
) {
1631 /* disable incompatible options */
1632 symbol_conf
.event_group
= false;
1633 symbol_conf
.cumulate_callchain
= false;
1636 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1637 parse_options_usage(top_usage
, options
, "fields", 0);
1638 parse_options_usage(NULL
, options
, "hierarchy", 0);
1639 goto out_delete_evlist
;
1643 if (opts
->branch_stack
&& callchain_param
.enabled
)
1644 symbol_conf
.show_branchflag_count
= true;
1646 sort__mode
= SORT_MODE__TOP
;
1647 /* display thread wants entries to be collapsed in a different tree */
1648 perf_hpp_list
.need_collapse
= 1;
1652 else if (top
.use_tui
)
1655 setup_browser(false);
1657 if (setup_sorting(top
.evlist
) < 0) {
1659 parse_options_usage(top_usage
, options
, "s", 1);
1661 parse_options_usage(sort_order
? NULL
: top_usage
,
1662 options
, "fields", 0);
1663 goto out_delete_evlist
;
1666 status
= target__validate(target
);
1668 target__strerror(target
, status
, errbuf
, BUFSIZ
);
1669 ui__warning("%s\n", errbuf
);
1672 status
= target__parse_uid(target
);
1674 int saved_errno
= errno
;
1676 target__strerror(target
, status
, errbuf
, BUFSIZ
);
1677 ui__error("%s\n", errbuf
);
1679 status
= -saved_errno
;
1680 goto out_delete_evlist
;
1683 if (target__none(target
))
1684 target
->system_wide
= true;
1686 if (perf_evlist__create_maps(top
.evlist
, target
) < 0) {
1687 ui__error("Couldn't create thread/CPU maps: %s\n",
1688 errno
== ENOENT
? "No such process" : str_error_r(errno
, errbuf
, sizeof(errbuf
)));
1689 goto out_delete_evlist
;
1692 if (top
.delay_secs
< 1)
1695 if (record_opts__config(opts
)) {
1697 goto out_delete_evlist
;
1700 top
.sym_evsel
= evlist__first(top
.evlist
);
1702 if (!callchain_param
.enabled
) {
1703 symbol_conf
.cumulate_callchain
= false;
1704 perf_hpp__cancel_cumulate();
1707 if (symbol_conf
.cumulate_callchain
&& !callchain_param
.order_set
)
1708 callchain_param
.order
= ORDER_CALLER
;
1710 status
= symbol__annotation_init();
1712 goto out_delete_evlist
;
1714 annotation_config__init(&top
.annotation_opts
);
1716 symbol_conf
.try_vmlinux_path
= (symbol_conf
.vmlinux_name
== NULL
);
1717 status
= symbol__init(NULL
);
1719 goto out_delete_evlist
;
1721 sort__setup_elide(stdout
);
1723 get_term_dimensions(&top
.winsize
);
1724 if (top
.print_entries
== 0) {
1725 perf_top__update_print_entries(&top
);
1726 signal(SIGWINCH
, winch_sig
);
1729 top
.session
= perf_session__new(NULL
, false, NULL
);
1730 if (IS_ERR(top
.session
)) {
1731 status
= PTR_ERR(top
.session
);
1732 goto out_delete_evlist
;
1735 if (!top
.record_opts
.no_bpf_event
)
1736 bpf_event__add_sb_event(&sb_evlist
, &perf_env
);
1738 if (perf_evlist__start_sb_thread(sb_evlist
, target
)) {
1739 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1740 opts
->no_bpf_event
= true;
1743 status
= __cmd_top(&top
);
1745 if (!opts
->no_bpf_event
)
1746 perf_evlist__stop_sb_thread(sb_evlist
);
1749 evlist__delete(top
.evlist
);
1750 perf_session__delete(top
.session
);