]> git.ipfire.org Git - thirdparty/kernel/linux.git/blame - tools/perf/builtin-top.c
Merge tag 'net-6.16-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
[thirdparty/kernel/linux.git] / tools / perf / builtin-top.c
CommitLineData
91007045 1// SPDX-License-Identifier: GPL-2.0-only
07800601 2/*
bf9e1876
IM
3 * builtin-top.c
4 *
5 * Builtin top command: Display a continuously updated profile of
6 * any workload, CPU or specific PID.
7 *
8 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
ab81f3fd 9 * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
bf9e1876
IM
10 *
11 * Improvements and fixes by:
12 *
13 * Arjan van de Ven <arjan@linux.intel.com>
14 * Yanmin Zhang <yanmin.zhang@intel.com>
15 * Wu Fengguang <fengguang.wu@intel.com>
16 * Mike Galbraith <efault@gmx.de>
17 * Paul Mackerras <paulus@samba.org>
07800601 18 */
bf9e1876 19#include "builtin.h"
07800601 20
1a482f38 21#include "perf.h"
bf9e1876 22
36532461 23#include "util/annotate.h"
a40b95bc 24#include "util/bpf-event.h"
51f38242 25#include "util/cgroup.h"
41840d21 26#include "util/config.h"
8fc0321f 27#include "util/color.h"
4a3cec84 28#include "util/dso.h"
361c99a6 29#include "util/evlist.h"
69aad6f1 30#include "util/evsel.h"
95be9d19 31#include "util/evsel_config.h"
5ab8c689 32#include "util/event.h"
b0a7d1a0 33#include "util/machine.h"
1101f69a 34#include "util/map.h"
e0fcfb08 35#include "util/mmap.h"
b3165f41 36#include "util/session.h"
13e0c844 37#include "util/thread.h"
7d45f402 38#include "util/stat.h"
b3165f41 39#include "util/symbol.h"
ea49e01c 40#include "util/synthetic-events.h"
8c3e10eb 41#include "util/top.h"
2da39f1c 42#include "util/util.h"
43cbcd8a 43#include <linux/rbtree.h>
4b6ab94e 44#include <subcmd/parse-options.h>
b456bae0 45#include "util/parse-events.h"
5c9dbe6d 46#include "util/callchain.h"
a12b51c4 47#include "util/cpumap.h"
ab81f3fd 48#include "util/sort.h"
6a9fa4e3 49#include "util/string2.h"
b0742e90 50#include "util/term.h"
6b118e92 51#include "util/intlist.h"
a18b027e 52#include "util/parse-branch-options.h"
0d3942db 53#include "arch/common.h"
8520a98d 54#include "ui/ui.h"
07800601 55
8f28827a 56#include "util/debug.h"
16c66bc1 57#include "util/ordered-events.h"
70943490 58#include "util/pfm.h"
8f28827a 59
07800601 60#include <assert.h>
31d68e7b 61#include <elf.h>
07800601 62#include <fcntl.h>
0e9b20b8 63
07800601 64#include <stdio.h>
923c42c1
MG
65#include <termios.h>
66#include <unistd.h>
9486aa38 67#include <inttypes.h>
0e9b20b8 68
07800601 69#include <errno.h>
07800601
IM
70#include <time.h>
71#include <sched.h>
9607ad3a 72#include <signal.h>
07800601
IM
73
74#include <sys/syscall.h>
75#include <sys/ioctl.h>
a8fa4960 76#include <poll.h>
07800601
IM
77#include <sys/prctl.h>
78#include <sys/wait.h>
79#include <sys/uio.h>
31d68e7b 80#include <sys/utsname.h>
07800601
IM
81#include <sys/mman.h>
82
531d2410 83#include <linux/stringify.h>
b9c4b0f4 84#include <linux/time64.h>
07800601 85#include <linux/types.h>
6ef81c55 86#include <linux/err.h>
07800601 87
3052ba56 88#include <linux/ctype.h>
7728fa0c 89#include <perf/mmap.h>
3d689ed6 90
69176896
IR
91static volatile sig_atomic_t done;
92static volatile sig_atomic_t resize;
11859e82 93
933cbb1c
NK
94#define HEADER_LINE_NR 5
95
1758af10 96static void perf_top__update_print_entries(struct perf_top *top)
3b6ed988 97{
933cbb1c 98 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
3b6ed988
ACM
99}
100
244a1086 101static void winch_sig(int sig __maybe_unused)
3b6ed988 102{
b135e5ee
JO
103 resize = 1;
104}
1758af10 105
b135e5ee
JO
106static void perf_top__resize(struct perf_top *top)
107{
1758af10
ACM
108 get_term_dimensions(&top->winsize);
109 perf_top__update_print_entries(top);
3b6ed988
ACM
110}
111
1758af10 112static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
923c42c1 113{
32dcd021 114 struct evsel *evsel;
923c42c1 115 struct symbol *sym;
ce6f4fab 116 struct annotation *notes;
439d473b 117 struct map *map;
63df0e4b 118 struct dso *dso;
36532461 119 int err = -1;
923c42c1 120
ab81f3fd 121 if (!he || !he->ms.sym)
b0a9ab62
ACM
122 return -1;
123
111442cf
LY
124 evsel = hists_to_evsel(he->hists);
125
ab81f3fd
ACM
126 sym = he->ms.sym;
127 map = he->ms.map;
63df0e4b 128 dso = map__dso(map);
b0a9ab62
ACM
129
130 /*
131 * We can't annotate with just /proc/kallsyms
132 */
ee756ef7 133 if (dso__symtab_type(dso) == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
ce6f4fab
ACM
134 pr_err("Can't annotate %s: No vmlinux file was found in the "
135 "path\n", sym->name);
136 sleep(1);
b0a9ab62 137 return -1;
b269876c
ACM
138 }
139
ce6f4fab 140 notes = symbol__annotation(sym);
2e9f9d4a 141 annotation__lock(notes);
923c42c1 142
6484d2f9 143 if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
2e9f9d4a 144 annotation__unlock(notes);
36532461
ACM
145 pr_err("Not enough memory for annotating '%s' symbol!\n",
146 sym->name);
ce6f4fab 147 sleep(1);
c97cf422 148 return err;
923c42c1 149 }
36532461 150
41fd3cac 151 err = symbol__annotate(&he->ms, evsel, NULL);
36532461 152 if (err == 0) {
1758af10 153 top->sym_filter_entry = he;
ee51d851
ACM
154 } else {
155 char msg[BUFSIZ];
29754894 156 symbol__strerror_disassemble(&he->ms, err, msg, sizeof(msg));
ee51d851 157 pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
36532461 158 }
c97cf422 159
2e9f9d4a 160 annotation__unlock(notes);
36532461 161 return err;
923c42c1
MG
162}
163
ab81f3fd 164static void __zero_source_counters(struct hist_entry *he)
923c42c1 165{
ab81f3fd 166 struct symbol *sym = he->ms.sym;
36532461 167 symbol__annotate_zero_histograms(sym);
923c42c1
MG
168}
169
31d68e7b
ACM
170static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
171{
172 struct utsname uts;
173 int err = uname(&uts);
63df0e4b 174 struct dso *dso = map__dso(map);
31d68e7b
ACM
175
176 ui__warning("Out of bounds address found:\n\n"
177 "Addr: %" PRIx64 "\n"
178 "DSO: %s %c\n"
179 "Map: %" PRIx64 "-%" PRIx64 "\n"
180 "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
181 "Arch: %s\n"
182 "Kernel: %s\n"
183 "Tools: %s\n\n"
184 "Not all samples will be on the annotation output.\n\n"
185 "Please report to linux-kernel@vger.kernel.org\n",
ee756ef7 186 ip, dso__long_name(dso), dso__symtab_origin(dso),
e5116f46 187 map__start(map), map__end(map), sym->start, sym->end,
31d68e7b
ACM
188 sym->binding == STB_GLOBAL ? 'g' :
189 sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
190 err ? "[unknown]" : uts.machine,
191 err ? "[unknown]" : uts.release, perf_version_string);
192 if (use_browser <= 0)
193 sleep(5);
48000a1a 194
a0310736 195 map__set_erange_warned(map);
31d68e7b
ACM
196}
197
1758af10
ACM
198static void perf_top__record_precise_ip(struct perf_top *top,
199 struct hist_entry *he,
bab89f6a 200 struct perf_sample *sample,
32dcd021 201 struct evsel *evsel, u64 ip)
b40b2122 202 EXCLUSIVE_LOCKS_REQUIRED(he->hists->lock)
923c42c1 203{
ce6f4fab 204 struct annotation *notes;
beefb8d0 205 struct symbol *sym = he->ms.sym;
48c65bda 206 int err = 0;
ce6f4fab 207
beefb8d0
NK
208 if (sym == NULL || (use_browser == 0 &&
209 (top->sym_filter_entry == NULL ||
210 top->sym_filter_entry->ms.sym != sym)))
923c42c1
MG
211 return;
212
ce6f4fab
ACM
213 notes = symbol__annotation(sym);
214
2e9f9d4a 215 if (!annotation__trylock(notes))
923c42c1
MG
216 return;
217
e345f3bd 218 err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
c7ad21af 219
2e9f9d4a 220 annotation__unlock(notes);
31d68e7b 221
151ee834
NK
222 if (unlikely(err)) {
223 /*
224 * This function is now called with he->hists->lock held.
225 * Release it before going to sleep.
226 */
8e03bb88 227 mutex_unlock(&he->hists->lock);
151ee834 228
e6a9efce 229 if (err == -ERANGE && !map__erange_warned(he->ms.map))
151ee834
NK
230 ui__warn_map_erange(he->ms.map, sym, ip);
231 else if (err == -ENOMEM) {
232 pr_err("Not enough memory for annotating '%s' symbol!\n",
233 sym->name);
234 sleep(1);
235 }
236
8e03bb88 237 mutex_lock(&he->hists->lock);
b66d8c0c 238 }
923c42c1
MG
239}
240
1758af10 241static void perf_top__show_details(struct perf_top *top)
923c42c1 242{
1758af10 243 struct hist_entry *he = top->sym_filter_entry;
32dcd021 244 struct evsel *evsel;
ce6f4fab 245 struct annotation *notes;
923c42c1 246 struct symbol *symbol;
36532461 247 int more;
923c42c1 248
ab81f3fd 249 if (!he)
923c42c1
MG
250 return;
251
111442cf
LY
252 evsel = hists_to_evsel(he->hists);
253
ab81f3fd 254 symbol = he->ms.sym;
ce6f4fab
ACM
255 notes = symbol__annotation(symbol);
256
2e9f9d4a 257 annotation__lock(notes);
ce6f4fab 258
f681d593
JO
259 symbol__calc_percent(symbol, evsel);
260
ce6f4fab
ACM
261 if (notes->src == NULL)
262 goto out_unlock;
923c42c1 263
8ab2e96d 264 printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
c9a21a87 265 printf(" Events Pcnt (>=%d%%)\n", annotate_opts.min_pcnt);
923c42c1 266
fe8da669 267 more = hist_entry__annotate_printf(he, top->sym_evsel);
5d484f99
ACM
268
269 if (top->evlist->enabled) {
270 if (top->zero)
035f0c27 271 symbol__annotate_zero_histogram(symbol, top->sym_evsel);
5d484f99 272 else
035f0c27 273 symbol__annotate_decay_histogram(symbol, top->sym_evsel);
5d484f99 274 }
36532461 275 if (more != 0)
923c42c1 276 printf("%d lines not displayed, maybe increase display entries [e]\n", more);
ce6f4fab 277out_unlock:
2e9f9d4a 278 annotation__unlock(notes);
923c42c1 279}
07800601 280
ea4385f8 281static void perf_top__resort_hists(struct perf_top *t)
40d81772 282{
ea4385f8 283 struct evlist *evlist = t->evlist;
40d81772
ACM
284 struct evsel *pos;
285
286 evlist__for_each_entry(evlist, pos) {
287 struct hists *hists = evsel__hists(pos);
288
be5863b7
NK
289 /*
290 * unlink existing entries so that they can be linked
291 * in a correct order in hists__match() below.
292 */
293 hists__unlink(hists);
294
ea4385f8
NK
295 if (evlist->enabled) {
296 if (t->zero) {
297 hists__delete_entries(hists);
298 } else {
299 hists__decay_entries(hists, t->hide_user_symbols,
300 t->hide_kernel_symbols);
301 }
302 }
303
40d81772
ACM
304 hists__collapse_resort(hists, NULL);
305
306 /* Non-group events are considered as leader */
c754c382 307 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
fba7c866 308 struct hists *leader_hists = evsel__hists(evsel__leader(pos));
40d81772
ACM
309
310 hists__match(leader_hists, hists);
311 hists__link(leader_hists, hists);
312 }
313 }
314
315 evlist__for_each_entry(evlist, pos) {
10c513f7 316 evsel__output_resort(pos, NULL);
40d81772
ACM
317 }
318}
319
1758af10 320static void perf_top__print_sym_table(struct perf_top *top)
07800601 321{
8c3e10eb
ACM
322 char bf[160];
323 int printed = 0;
1758af10 324 const int win_width = top->winsize.ws_col - 1;
32dcd021 325 struct evsel *evsel = top->sym_evsel;
452ce03b 326 struct hists *hists = evsel__hists(evsel);
d94b9430 327
0f5486b5 328 puts(CONSOLE_CLEAR);
07800601 329
1758af10 330 perf_top__header_snprintf(top, bf, sizeof(bf));
8c3e10eb 331 printf("%s\n", bf);
07800601 332
1a105f74 333 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
07800601 334
a1ff5b05 335 if (!top->record_opts.overwrite &&
bf8f8587
NK
336 (top->evlist->stats.nr_lost_warned !=
337 top->evlist->stats.nr_events[PERF_RECORD_LOST])) {
338 top->evlist->stats.nr_lost_warned =
339 top->evlist->stats.nr_events[PERF_RECORD_LOST];
7b27509f
ACM
340 color_fprintf(stdout, PERF_COLOR_RED,
341 "WARNING: LOST %d chunks, Check IO/CPU overload",
bf8f8587 342 top->evlist->stats.nr_lost_warned);
ab81f3fd 343 ++printed;
93fc64f1
ACM
344 }
345
1758af10
ACM
346 if (top->sym_filter_entry) {
347 perf_top__show_details(top);
923c42c1
MG
348 return;
349 }
350
ea4385f8 351 perf_top__resort_hists(top);
701937bd 352
4ea062ed 353 hists__output_recalc_col_len(hists, top->print_entries - printed);
7cc017ed 354 putchar('\n');
4ea062ed 355 hists__fprintf(hists, false, top->print_entries - printed, win_width,
e9de7e2f 356 top->min_percent, stdout, !symbol_conf.use_callchain);
07800601
IM
357}
358
923c42c1
MG
359static void prompt_integer(int *target, const char *msg)
360{
79baac8a 361 char *buf = NULL, *p;
923c42c1
MG
362 size_t dummy = 0;
363 int tmp;
364
365 fprintf(stdout, "\n%s: ", msg);
366 if (getline(&buf, &dummy, stdin) < 0)
367 return;
368
369 p = strchr(buf, '\n');
370 if (p)
371 *p = 0;
372
373 p = buf;
374 while(*p) {
375 if (!isdigit(*p))
376 goto out_free;
377 p++;
378 }
379 tmp = strtoul(buf, NULL, 10);
380 *target = tmp;
381out_free:
382 free(buf);
383}
384
385static void prompt_percent(int *target, const char *msg)
386{
387 int tmp = 0;
388
389 prompt_integer(&tmp, msg);
390 if (tmp >= 0 && tmp <= 100)
391 *target = tmp;
392}
393
1758af10 394static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
923c42c1 395{
cddeeeda 396 char *buf = NULL, *p;
1758af10 397 struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
4ea062ed 398 struct hists *hists = evsel__hists(top->sym_evsel);
ab81f3fd 399 struct rb_node *next;
923c42c1
MG
400 size_t dummy = 0;
401
402 /* zero counters of active symbol */
403 if (syme) {
923c42c1 404 __zero_source_counters(syme);
1758af10 405 top->sym_filter_entry = NULL;
923c42c1
MG
406 }
407
408 fprintf(stdout, "\n%s: ", msg);
409 if (getline(&buf, &dummy, stdin) < 0)
410 goto out_free;
411
412 p = strchr(buf, '\n');
413 if (p)
414 *p = 0;
415
2eb3d689 416 next = rb_first_cached(&hists->entries);
ab81f3fd
ACM
417 while (next) {
418 n = rb_entry(next, struct hist_entry, rb_node);
419 if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
420 found = n;
923c42c1
MG
421 break;
422 }
ab81f3fd 423 next = rb_next(&n->rb_node);
923c42c1
MG
424 }
425
426 if (!found) {
66aeb6d5 427 fprintf(stderr, "Sorry, %s is not active.\n", buf);
923c42c1 428 sleep(1);
923c42c1 429 } else
1758af10 430 perf_top__parse_source(top, found);
923c42c1
MG
431
432out_free:
433 free(buf);
434}
435
1758af10 436static void perf_top__print_mapped_keys(struct perf_top *top)
923c42c1 437{
091bd2e9
MG
438 char *name = NULL;
439
1758af10
ACM
440 if (top->sym_filter_entry) {
441 struct symbol *sym = top->sym_filter_entry->ms.sym;
091bd2e9
MG
442 name = sym->name;
443 }
444
445 fprintf(stdout, "\nMapped keys:\n");
1758af10
ACM
446 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
447 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
091bd2e9 448
6484d2f9 449 if (top->evlist->core.nr_entries > 1)
8ab2e96d 450 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", evsel__name(top->sym_evsel));
091bd2e9 451
1758af10 452 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
091bd2e9 453
c9a21a87 454 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", annotate_opts.min_pcnt);
6cff0e8d
KS
455 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
456 fprintf(stdout, "\t[S] stop annotation.\n");
091bd2e9 457
8ffcda17 458 fprintf(stdout,
8fce3743 459 "\t[K] hide kernel symbols. \t(%s)\n",
1758af10 460 top->hide_kernel_symbols ? "yes" : "no");
8ffcda17
ACM
461 fprintf(stdout,
462 "\t[U] hide user symbols. \t(%s)\n",
1758af10
ACM
463 top->hide_user_symbols ? "yes" : "no");
464 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
091bd2e9
MG
465 fprintf(stdout, "\t[qQ] quit.\n");
466}
467
1758af10 468static int perf_top__key_mapped(struct perf_top *top, int c)
091bd2e9
MG
469{
470 switch (c) {
471 case 'd':
472 case 'e':
473 case 'f':
474 case 'z':
475 case 'q':
476 case 'Q':
8ffcda17
ACM
477 case 'K':
478 case 'U':
6cff0e8d
KS
479 case 'F':
480 case 's':
481 case 'S':
091bd2e9
MG
482 return 1;
483 case 'E':
6484d2f9 484 return top->evlist->core.nr_entries > 1 ? 1 : 0;
83a0944f
IM
485 default:
486 break;
091bd2e9
MG
487 }
488
489 return 0;
923c42c1
MG
490}
491
11859e82 492static bool perf_top__handle_keypress(struct perf_top *top, int c)
923c42c1 493{
11859e82
ACM
494 bool ret = true;
495
1758af10 496 if (!perf_top__key_mapped(top, c)) {
091bd2e9 497 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
3969cc09 498 struct termios save;
091bd2e9 499
1758af10 500 perf_top__print_mapped_keys(top);
091bd2e9
MG
501 fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
502 fflush(stdout);
503
3969cc09 504 set_term_quiet_input(&save);
091bd2e9
MG
505
506 poll(&stdin_poll, 1, -1);
507 c = getc(stdin);
508
509 tcsetattr(0, TCSAFLUSH, &save);
1758af10 510 if (!perf_top__key_mapped(top, c))
11859e82 511 return ret;
091bd2e9
MG
512 }
513
923c42c1
MG
514 switch (c) {
515 case 'd':
1758af10
ACM
516 prompt_integer(&top->delay_secs, "Enter display delay");
517 if (top->delay_secs < 1)
518 top->delay_secs = 1;
923c42c1
MG
519 break;
520 case 'e':
1758af10
ACM
521 prompt_integer(&top->print_entries, "Enter display entries (lines)");
522 if (top->print_entries == 0) {
b135e5ee 523 perf_top__resize(top);
244a1086 524 signal(SIGWINCH, winch_sig);
509605db 525 } else {
3b6ed988 526 signal(SIGWINCH, SIG_DFL);
509605db 527 }
923c42c1
MG
528 break;
529 case 'E':
6484d2f9 530 if (top->evlist->core.nr_entries > 1) {
ce2d17ca
AN
531 /* Select 0 as the default event: */
532 int counter = 0;
533
923c42c1 534 fprintf(stderr, "\nAvailable events:");
69aad6f1 535
e5cadb93 536 evlist__for_each_entry(top->evlist, top->sym_evsel)
38fe0e01 537 fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel));
923c42c1 538
ec52d976 539 prompt_integer(&counter, "Enter details event counter");
923c42c1 540
6484d2f9 541 if (counter >= top->evlist->core.nr_entries) {
515dbe48 542 top->sym_evsel = evlist__first(top->evlist);
8ab2e96d 543 fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel));
923c42c1 544 sleep(1);
69aad6f1 545 break;
923c42c1 546 }
e5cadb93 547 evlist__for_each_entry(top->evlist, top->sym_evsel)
38fe0e01 548 if (top->sym_evsel->core.idx == counter)
69aad6f1 549 break;
ec52d976 550 } else
515dbe48 551 top->sym_evsel = evlist__first(top->evlist);
923c42c1
MG
552 break;
553 case 'f':
1758af10 554 prompt_integer(&top->count_filter, "Enter display event count filter");
923c42c1
MG
555 break;
556 case 'F':
c9a21a87 557 prompt_percent(&annotate_opts.min_pcnt,
1758af10 558 "Enter details display event filter (percent)");
923c42c1 559 break;
8ffcda17 560 case 'K':
1758af10 561 top->hide_kernel_symbols = !top->hide_kernel_symbols;
8ffcda17 562 break;
923c42c1
MG
563 case 'q':
564 case 'Q':
565 printf("exiting.\n");
1758af10
ACM
566 if (top->dump_symtab)
567 perf_session__fprintf_dsos(top->session, stderr);
11859e82
ACM
568 ret = false;
569 break;
923c42c1 570 case 's':
1758af10 571 perf_top__prompt_symbol(top, "Enter details symbol");
923c42c1
MG
572 break;
573 case 'S':
1758af10 574 if (!top->sym_filter_entry)
923c42c1
MG
575 break;
576 else {
1758af10 577 struct hist_entry *syme = top->sym_filter_entry;
923c42c1 578
1758af10 579 top->sym_filter_entry = NULL;
923c42c1 580 __zero_source_counters(syme);
923c42c1
MG
581 }
582 break;
8ffcda17 583 case 'U':
1758af10 584 top->hide_user_symbols = !top->hide_user_symbols;
8ffcda17 585 break;
923c42c1 586 case 'z':
1758af10 587 top->zero = !top->zero;
923c42c1 588 break;
83a0944f
IM
589 default:
590 break;
923c42c1 591 }
11859e82
ACM
592
593 return ret;
923c42c1
MG
594}
595
ab81f3fd
ACM
596static void perf_top__sort_new_samples(void *arg)
597{
598 struct perf_top *t = arg;
4ea062ed 599
ab81f3fd
ACM
600 if (t->evlist->selected != NULL)
601 t->sym_evsel = t->evlist->selected;
602
ea4385f8 603 perf_top__resort_hists(t);
254de74c 604
d8590430 605 if (t->lost || t->drop)
254de74c 606 pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
ab81f3fd
ACM
607}
608
c94cef4b
JO
609static void stop_top(void)
610{
611 session_done = 1;
612 done = 1;
613}
614
1758af10 615static void *display_thread_tui(void *arg)
c0443df1 616{
32dcd021 617 struct evsel *pos;
1758af10 618 struct perf_top *top = arg;
ab81f3fd 619 const char *help = "For a higher level overview, try: perf top --sort comm,dso";
9783adf7
NK
620 struct hist_browser_timer hbt = {
621 .timer = perf_top__sort_new_samples,
622 .arg = top,
623 .refresh = top->delay_secs,
624 };
2605af0f 625 int ret;
ab81f3fd 626
868a8329
KJ
627 /* In order to read symbols from other namespaces perf to needs to call
628 * setns(2). This isn't permitted if the struct_fs has multiple users.
629 * unshare(2) the fs so that we may continue to setns into namespaces
630 * that we're observing.
631 */
632 unshare(CLONE_FS);
633
1205a271
ACM
634 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
635
2605af0f 636repeat:
1758af10 637 perf_top__sort_new_samples(top);
0d37aa34
ACM
638
639 /*
640 * Initialize the uid_filter_str, in the future the TUI will allow
adba1634 641 * Zooming in/out UIDs. For now just use whatever the user passed
0d37aa34
ACM
642 * via --uid.
643 */
e5cadb93 644 evlist__for_each_entry(top->evlist, pos) {
4ea062ed
ACM
645 struct hists *hists = evsel__hists(pos);
646 hists->uid_filter_str = top->record_opts.target.uid_str;
647 }
0d37aa34 648
f4bd0b4a 649 ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
22197fb2 650 &top->session->header.env, !top->record_opts.overwrite);
2605af0f
JY
651 if (ret == K_RELOAD) {
652 top->zero = true;
653 goto repeat;
654 } else
655 stop_top();
656
c0443df1
ACM
657 return NULL;
658}
659
4a1a9971
JO
660static void display_sig(int sig __maybe_unused)
661{
c94cef4b 662 stop_top();
4a1a9971
JO
663}
664
665static void display_setup_sig(void)
666{
09f4d78a
ACM
667 signal(SIGSEGV, sighandler_dump_stack);
668 signal(SIGFPE, sighandler_dump_stack);
4a1a9971
JO
669 signal(SIGINT, display_sig);
670 signal(SIGQUIT, display_sig);
671 signal(SIGTERM, display_sig);
672}
673
1758af10 674static void *display_thread(void *arg)
07800601 675{
0f5486b5 676 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
9398c484 677 struct termios save;
1758af10 678 struct perf_top *top = arg;
923c42c1
MG
679 int delay_msecs, c;
680
868a8329
KJ
681 /* In order to read symbols from other namespaces perf to needs to call
682 * setns(2). This isn't permitted if the struct_fs has multiple users.
683 * unshare(2) the fs so that we may continue to setns into namespaces
684 * that we're observing.
685 */
686 unshare(CLONE_FS);
687
1205a271
ACM
688 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
689
4a1a9971 690 display_setup_sig();
3af6e338 691 pthread__unblock_sigwinch();
923c42c1 692repeat:
b9c4b0f4 693 delay_msecs = top->delay_secs * MSEC_PER_SEC;
9398c484 694 set_term_quiet_input(&save);
923c42c1 695 /* trash return*/
29b4f5f1
TR
696 clearerr(stdin);
697 if (poll(&stdin_poll, 1, 0) > 0)
698 getc(stdin);
07800601 699
11859e82 700 while (!done) {
1758af10 701 perf_top__print_sym_table(top);
3af6e338
ACM
702 /*
703 * Either timeout expired or we got an EINTR due to SIGWINCH,
704 * refresh screen in both cases.
705 */
706 switch (poll(&stdin_poll, 1, delay_msecs)) {
707 case 0:
708 continue;
709 case -1:
710 if (errno == EINTR)
711 continue;
f7a858bf 712 fallthrough;
3af6e338 713 default:
11859e82
ACM
714 c = getc(stdin);
715 tcsetattr(0, TCSAFLUSH, &save);
716
717 if (perf_top__handle_keypress(top, c))
718 goto repeat;
c94cef4b 719 stop_top();
3af6e338
ACM
720 }
721 }
07800601 722
4a1a9971 723 tcsetattr(0, TCSAFLUSH, &save);
07800601
IM
724 return NULL;
725}
726
7c50391f
NK
727static int hist_iter__top_callback(struct hist_entry_iter *iter,
728 struct addr_location *al, bool single,
729 void *arg)
b40b2122 730 EXCLUSIVE_LOCKS_REQUIRED(iter->he->hists->lock)
7c50391f
NK
731{
732 struct perf_top *top = arg;
32dcd021 733 struct evsel *evsel = iter->evsel;
7c50391f 734
2e0453af 735 if (perf_hpp_list.sym && single)
b40b2122 736 perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr);
7c50391f 737
a18b027e 738 hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
1f2b7fbb
KL
739 !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
740 NULL, evsel);
7c50391f
NK
741 return 0;
742}
743
30f29bae 744static void perf_event__process_sample(const struct perf_tool *tool,
1758af10 745 const union perf_event *event,
32dcd021 746 struct evsel *evsel,
8115d60c 747 struct perf_sample *sample,
743eb868 748 struct machine *machine)
07800601 749{
1758af10 750 struct perf_top *top = container_of(tool, struct perf_top, tool);
1ed091c4 751 struct addr_location al;
5b2bb75a 752
23346f21 753 if (!machine && perf_guest) {
6b118e92
DA
754 static struct intlist *seen;
755
756 if (!seen)
ffe0fb76 757 seen = intlist__new(NULL);
6b118e92 758
ef89325f 759 if (!intlist__has_entry(seen, sample->pid)) {
6b118e92 760 pr_err("Can't find guest [%d]'s kernel information\n",
ef89325f
AH
761 sample->pid);
762 intlist__add(seen, sample->pid);
6b118e92 763 }
a1645ce1
ZY
764 return;
765 }
766
0c095715 767 if (!machine) {
11859e82 768 pr_err("%u unprocessable samples recorded.\r",
75be989a 769 top->session->evlist->stats.nr_unprocessable_samples++);
0c095715
JR
770 return;
771 }
772
8115d60c 773 if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
1758af10 774 top->exact_samples++;
1676b8a0 775
0dd5041c 776 addr_location__init(&al);
bb3eb566 777 if (machine__resolve(machine, &al, sample) < 0)
0dd5041c 778 goto out;
07800601 779
13e0c844 780 if (top->stitch_lbr)
ee84a303 781 thread__set_lbr_stitch_enable(al.thread, true);
13e0c844 782
e77a0742 783 if (!machine->kptr_restrict_warned &&
5f6f5580
ACM
784 symbol_conf.kptr_restrict &&
785 al.cpumode == PERF_RECORD_MISC_KERNEL) {
78e1bc25 786 if (!evlist__exclude_kernel(top->session->evlist)) {
b89a5124 787 ui__warning(
5f6f5580 788"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
d06e5fad 789"Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
5f6f5580 790"Kernel%s samples will not be resolved.\n",
e94b861a 791 al.map && map__has_symbols(al.map) ?
5f6f5580 792 " modules" : "");
b89a5124
ACM
793 if (use_browser <= 0)
794 sleep(5);
795 }
e77a0742 796 machine->kptr_restrict_warned = true;
5f6f5580
ACM
797 }
798
68766bfa 799 if (al.sym == NULL && al.map != NULL) {
e4a338d0 800 const char *msg = "Kernel samples will not be resolved.\n";
72b8fa17
ACM
801 /*
802 * As we do lazy loading of symtabs we only will know if the
803 * specified vmlinux file is invalid when we actually have a
804 * hit in kernel space and then try to load it. So if we get
805 * here and there are _no_ symbols in the DSO backing the
806 * kernel map, bail out.
807 *
808 * We may never get here, for instance, if we use -K/
809 * --hide-kernel-symbols, even if the user specifies an
810 * invalid --vmlinux ;-)
811 */
e77a0742 812 if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
058b38cc 813 __map__is_kernel(al.map) && !map__has_symbols(al.map)) {
e4a338d0 814 if (symbol_conf.vmlinux_name) {
18425f13 815 char serr[256];
63df0e4b
IR
816
817 dso__strerror_load(map__dso(al.map), serr, sizeof(serr));
18425f13
ACM
818 ui__warning("The %s file can't be used: %s\n%s",
819 symbol_conf.vmlinux_name, serr, msg);
e4a338d0
ACM
820 } else {
821 ui__warning("A vmlinux file was not found.\n%s",
822 msg);
823 }
824
825 if (use_browser <= 0)
826 sleep(5);
1758af10 827 top->vmlinux_warned = true;
72b8fa17 828 }
6cff0e8d
KS
829 }
830
b55cc4ed 831 if (al.sym == NULL || !al.sym->idle) {
4ea062ed 832 struct hists *hists = evsel__hists(evsel);
7c50391f 833 struct hist_entry_iter iter = {
063bd936
NK
834 .evsel = evsel,
835 .sample = sample,
836 .add_entry_cb = hist_iter__top_callback,
7c50391f 837 };
70db7533 838
7c50391f
NK
839 if (symbol_conf.cumulate_callchain)
840 iter.ops = &hist_iter_cumulative;
841 else
842 iter.ops = &hist_iter_normal;
19d4ac3c 843
8e03bb88 844 mutex_lock(&hists->lock);
ab81f3fd 845
662a9810 846 if (hist_entry_iter__add(&iter, &al, top->max_stack, top) < 0)
7c50391f 847 pr_err("Problem incrementing symbol period, skipping event\n");
19d4ac3c 848
8e03bb88 849 mutex_unlock(&hists->lock);
5b2bb75a 850 }
ab81f3fd 851
0dd5041c
IR
852out:
853 addr_location__exit(&al);
07800601
IM
854}
855
d24e3c98
JO
856static void
857perf_top__process_lost(struct perf_top *top, union perf_event *event,
32dcd021 858 struct evsel *evsel)
d24e3c98 859{
d24e3c98
JO
860 top->lost += event->lost.lost;
861 top->lost_total += event->lost.lost;
bf8f8587 862 evsel->evlist->stats.total_lost += event->lost.lost;
d24e3c98
JO
863}
864
865static void
866perf_top__process_lost_samples(struct perf_top *top,
867 union perf_event *event,
32dcd021 868 struct evsel *evsel)
d24e3c98 869{
d24e3c98
JO
870 top->lost += event->lost_samples.lost;
871 top->lost_total += event->lost_samples.lost;
bf8f8587 872 evsel->evlist->stats.total_lost_samples += event->lost_samples.lost;
d24e3c98
JO
873}
874
d63b9f6f
JO
875static u64 last_timestamp;
876
1758af10 877static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
07800601 878{
ebebbf08 879 struct record_opts *opts = &top->record_opts;
63503dba 880 struct evlist *evlist = top->evlist;
a5830532 881 struct mmap *md;
8115d60c 882 union perf_event *event;
07800601 883
ebebbf08 884 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
7c4d4182 885 if (perf_mmap__read_init(&md->core) < 0)
ebebbf08
KL
886 return;
887
151ed5d7 888 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
16c66bc1 889 int ret;
7b27509f 890
2a6599cd 891 ret = evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
16c66bc1 892 if (ret && ret != -1)
743eb868 893 break;
743eb868 894
2292083f 895 ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0, NULL);
16c66bc1
JO
896 if (ret)
897 break;
94ad6e7e 898
7728fa0c 899 perf_mmap__consume(&md->core);
94ad6e7e
JO
900
901 if (top->qe.rotate) {
d8e40b58 902 mutex_lock(&top->qe.mutex);
94ad6e7e 903 top->qe.rotate = false;
d8e40b58
IR
904 cond_signal(&top->qe.cond);
905 mutex_unlock(&top->qe.mutex);
94ad6e7e 906 }
07800601 907 }
ebebbf08 908
32fdc2ca 909 perf_mmap__read_done(&md->core);
07800601
IM
910}
911
1758af10 912static void perf_top__mmap_read(struct perf_top *top)
2f01190a 913{
ebebbf08 914 bool overwrite = top->record_opts.overwrite;
63503dba 915 struct evlist *evlist = top->evlist;
70db7533
ACM
916 int i;
917
ebebbf08 918 if (overwrite)
ade9d208 919 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
ebebbf08 920
c976ee11 921 for (i = 0; i < top->evlist->core.nr_mmaps; i++)
1758af10 922 perf_top__mmap_read_idx(top, i);
ebebbf08
KL
923
924 if (overwrite) {
ade9d208
ACM
925 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
926 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
ebebbf08 927 }
2f01190a
FW
928}
929
63878a53
KL
930/*
931 * Check per-event overwrite term.
932 * perf top should support consistent term for all events.
933 * - All events don't have per-event term
934 * E.g. "cpu/cpu-cycles/,cpu/instructions/"
935 * Nothing change, return 0.
936 * - All events have same per-event term
937 * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
938 * Using the per-event setting to replace the opts->overwrite if
939 * they are different, then return 0.
940 * - Events have different per-event term
941 * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
942 * Return -1
943 * - Some of the event set per-event term, but some not.
944 * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
945 * Return -1
946 */
947static int perf_top__overwrite_check(struct perf_top *top)
948{
949 struct record_opts *opts = &top->record_opts;
63503dba 950 struct evlist *evlist = top->evlist;
35ac0cad 951 struct evsel_config_term *term;
63878a53 952 struct list_head *config_terms;
32dcd021 953 struct evsel *evsel;
63878a53
KL
954 int set, overwrite = -1;
955
956 evlist__for_each_entry(evlist, evsel) {
957 set = -1;
958 config_terms = &evsel->config_terms;
959 list_for_each_entry(term, config_terms, list) {
35ac0cad 960 if (term->type == EVSEL__CONFIG_TERM_OVERWRITE)
63878a53
KL
961 set = term->val.overwrite ? 1 : 0;
962 }
963
964 /* no term for current and previous event (likely) */
965 if ((overwrite < 0) && (set < 0))
966 continue;
967
968 /* has term for both current and previous event, compare */
969 if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
970 return -1;
971
972 /* no term for current event but has term for previous one */
973 if ((overwrite >= 0) && (set < 0))
974 return -1;
975
976 /* has term for current event */
977 if ((overwrite < 0) && (set >= 0)) {
978 /* if it's first event, set overwrite */
515dbe48 979 if (evsel == evlist__first(evlist))
63878a53
KL
980 overwrite = set;
981 else
982 return -1;
983 }
984 }
985
986 if ((overwrite >= 0) && (opts->overwrite != overwrite))
987 opts->overwrite = overwrite;
988
989 return 0;
990}
991
204721d7 992static int perf_top_overwrite_fallback(struct perf_top *top,
32dcd021 993 struct evsel *evsel)
204721d7
KL
994{
995 struct record_opts *opts = &top->record_opts;
63503dba 996 struct evlist *evlist = top->evlist;
32dcd021 997 struct evsel *counter;
204721d7
KL
998
999 if (!opts->overwrite)
1000 return 0;
1001
1002 /* only fall back when first event fails */
515dbe48 1003 if (evsel != evlist__first(evlist))
204721d7
KL
1004 return 0;
1005
1006 evlist__for_each_entry(evlist, counter)
1fc632ce 1007 counter->core.attr.write_backward = false;
204721d7 1008 opts->overwrite = false;
853745f5 1009 pr_debug2("fall back to non-overwrite mode\n");
204721d7
KL
1010 return 1;
1011}
1012
11859e82 1013static int perf_top__start_counters(struct perf_top *top)
72cb7013 1014{
d6195a6a 1015 char msg[BUFSIZ];
32dcd021 1016 struct evsel *counter;
63503dba 1017 struct evlist *evlist = top->evlist;
b4006796 1018 struct record_opts *opts = &top->record_opts;
727ab04e 1019
63878a53
KL
1020 if (perf_top__overwrite_check(top)) {
1021 ui__error("perf top only support consistent per-event "
1022 "overwrite setting for all events\n");
1023 goto out_err;
1024 }
1025
78e1bc25 1026 evlist__config(evlist, opts, &callchain_param);
7e4ff9e3 1027
e5cadb93 1028 evlist__for_each_entry(evlist, counter) {
72cb7013 1029try_again:
5fa695e7
KL
1030 if (evsel__open(counter, counter->core.cpus,
1031 counter->core.threads) < 0) {
204721d7
KL
1032
1033 /*
1034 * Specially handle overwrite fall back.
1035 * Because perf top is the only tool which has
1036 * overwrite mode by default, support
1037 * both overwrite and non-overwrite mode, and
1038 * require consistent mode for all events.
1039 *
1040 * May move it to generic code with more tools
1041 * have similar attribute.
1042 */
1043 if (perf_missing_features.write_backward &&
1044 perf_top_overwrite_fallback(top, counter))
1045 goto try_again;
1046
eb2eac0c 1047 if (evsel__fallback(counter, &opts->target, errno, msg, sizeof(msg))) {
bb963e16 1048 if (verbose > 0)
c0a54341 1049 ui__warning("%s\n", msg);
d6d901c2
ZY
1050 goto try_again;
1051 }
c286c419 1052
2bb72dbb 1053 evsel__open_strerror(counter, &opts->target, errno, msg, sizeof(msg));
56e52e85 1054 ui__error("%s\n", msg);
c286c419 1055 goto out_err;
d6d901c2 1056 }
716c69fe 1057 }
70db7533 1058
966854e7 1059 if (evlist__apply_filters(evlist, &counter, &opts->target)) {
af752016
IR
1060 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
1061 counter->filter ?: "BPF", evsel__name(counter), errno,
1062 str_error_r(errno, msg, sizeof(msg)));
1063 goto out_err;
1064 }
1065
9521b5f2 1066 if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
3780f488 1067 ui__error("Failed to mmap with %d (%s)\n",
c8b5f2c9 1068 errno, str_error_r(errno, msg, sizeof(msg)));
c286c419
ACM
1069 goto out_err;
1070 }
1071
11859e82 1072 return 0;
c286c419
ACM
1073
1074out_err:
11859e82 1075 return -1;
716c69fe
IM
1076}
1077
e3815264 1078static int callchain_param__setup_sample_type(struct callchain_param *callchain)
19d4ac3c 1079{
f2e14cd2 1080 if (callchain->mode != CHAIN_NONE) {
e3815264 1081 if (callchain_register_param(callchain) < 0) {
3780f488 1082 ui__error("Can't register callchain params.\n");
19d4ac3c
ACM
1083 return -EINVAL;
1084 }
1085 }
1086
1087 return 0;
1088}
1089
16c66bc1
JO
1090static struct ordered_events *rotate_queues(struct perf_top *top)
1091{
1092 struct ordered_events *in = top->qe.in;
1093
1094 if (top->qe.in == &top->qe.data[1])
1095 top->qe.in = &top->qe.data[0];
1096 else
1097 top->qe.in = &top->qe.data[1];
1098
1099 return in;
1100}
1101
1102static void *process_thread(void *arg)
1103{
1104 struct perf_top *top = arg;
1105
1106 while (!done) {
1107 struct ordered_events *out, *in = top->qe.in;
1108
1109 if (!in->nr_events) {
1110 usleep(100);
1111 continue;
1112 }
1113
16c66bc1 1114 out = rotate_queues(top);
94ad6e7e 1115
d8e40b58 1116 mutex_lock(&top->qe.mutex);
94ad6e7e 1117 top->qe.rotate = true;
d8e40b58
IR
1118 cond_wait(&top->qe.cond, &top->qe.mutex);
1119 mutex_unlock(&top->qe.mutex);
16c66bc1
JO
1120
1121 if (ordered_events__flush(out, OE_FLUSH__TOP))
1122 pr_err("failed to process events\n");
1123 }
1124
1125 return NULL;
1126}
1127
d63b9f6f
JO
1128/*
1129 * Allow only 'top->delay_secs' seconds behind samples.
1130 */
1131static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1132{
1133 union perf_event *event = qevent->event;
1134 u64 delay_timestamp;
1135
1136 if (event->header.type != PERF_RECORD_SAMPLE)
1137 return false;
1138
1139 delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1140 return delay_timestamp < last_timestamp;
1141}
1142
16c66bc1
JO
1143static int deliver_event(struct ordered_events *qe,
1144 struct ordered_event *qevent)
1145{
1146 struct perf_top *top = qe->data;
63503dba 1147 struct evlist *evlist = top->evlist;
16c66bc1
JO
1148 struct perf_session *session = top->session;
1149 union perf_event *event = qevent->event;
1150 struct perf_sample sample;
32dcd021 1151 struct evsel *evsel;
16c66bc1
JO
1152 struct machine *machine;
1153 int ret = -1;
1154
97f7e0b3
JO
1155 if (should_drop(qevent, top)) {
1156 top->drop++;
1157 top->drop_total++;
d63b9f6f 1158 return 0;
97f7e0b3 1159 }
d63b9f6f 1160
dc6d2bc2 1161 perf_sample__init(&sample, /*all=*/false);
2a6599cd 1162 ret = evlist__parse_sample(evlist, event, &sample);
16c66bc1
JO
1163 if (ret) {
1164 pr_err("Can't parse sample, err = %d\n", ret);
1165 goto next_event;
1166 }
1167
3ccf8a7b 1168 evsel = evlist__id2evsel(session->evlist, sample.id);
16c66bc1
JO
1169 assert(evsel != NULL);
1170
2f53ae34 1171 if (event->header.type == PERF_RECORD_SAMPLE) {
dc6d2bc2
IR
1172 if (evswitch__discard(&top->evswitch, evsel)) {
1173 ret = 0;
1174 goto next_event;
1175 }
16c66bc1 1176 ++top->samples;
2f53ae34 1177 }
16c66bc1
JO
1178
1179 switch (sample.cpumode) {
1180 case PERF_RECORD_MISC_USER:
1181 ++top->us_samples;
1182 if (top->hide_user_symbols)
1183 goto next_event;
1184 machine = &session->machines.host;
1185 break;
1186 case PERF_RECORD_MISC_KERNEL:
1187 ++top->kernel_samples;
1188 if (top->hide_kernel_symbols)
1189 goto next_event;
1190 machine = &session->machines.host;
1191 break;
1192 case PERF_RECORD_MISC_GUEST_KERNEL:
1193 ++top->guest_kernel_samples;
1194 machine = perf_session__find_machine(session,
1195 sample.pid);
1196 break;
1197 case PERF_RECORD_MISC_GUEST_USER:
1198 ++top->guest_us_samples;
1199 /*
1200 * TODO: we don't process guest user from host side
1201 * except simple counting.
1202 */
1203 goto next_event;
1204 default:
1205 if (event->header.type == PERF_RECORD_SAMPLE)
1206 goto next_event;
1207 machine = &session->machines.host;
1208 break;
1209 }
1210
1211 if (event->header.type == PERF_RECORD_SAMPLE) {
1212 perf_event__process_sample(&top->tool, event, evsel,
1213 &sample, machine);
1214 } else if (event->header.type == PERF_RECORD_LOST) {
1215 perf_top__process_lost(top, event, evsel);
1216 } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1217 perf_top__process_lost_samples(top, event, evsel);
1218 } else if (event->header.type < PERF_RECORD_MAX) {
bf8f8587 1219 events_stats__inc(&session->evlist->stats, event->header.type);
16c66bc1
JO
1220 machine__process_event(machine, event, &sample);
1221 } else
1222 ++session->evlist->stats.nr_unknown_events;
1223
1224 ret = 0;
1225next_event:
dc6d2bc2 1226 perf_sample__exit(&sample);
16c66bc1
JO
1227 return ret;
1228}
1229
1230static void init_process_thread(struct perf_top *top)
1231{
1232 ordered_events__init(&top->qe.data[0], deliver_event, top);
1233 ordered_events__init(&top->qe.data[1], deliver_event, top);
1234 ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1235 ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1236 top->qe.in = &top->qe.data[0];
d8e40b58
IR
1237 mutex_init(&top->qe.mutex);
1238 cond_init(&top->qe.cond);
16c66bc1
JO
1239}
1240
cddeeeda
IR
1241static void exit_process_thread(struct perf_top *top)
1242{
1243 ordered_events__free(&top->qe.data[0]);
1244 ordered_events__free(&top->qe.data[1]);
1245 mutex_destroy(&top->qe.mutex);
1246 cond_destroy(&top->qe.cond);
1247}
1248
1758af10 1249static int __cmd_top(struct perf_top *top)
716c69fe 1250{
b4006796 1251 struct record_opts *opts = &top->record_opts;
16c66bc1 1252 pthread_t thread, thread_process;
19d4ac3c 1253 int ret;
f5fc1412 1254
c9a21a87 1255 if (!annotate_opts.objdump_path) {
f178fd2d 1256 ret = perf_env__lookup_objdump(&top->session->header.env,
c9a21a87 1257 &annotate_opts.objdump_path);
0d3942db 1258 if (ret)
0dba9e4b 1259 return ret;
0d3942db
SB
1260 }
1261
e3815264 1262 ret = callchain_param__setup_sample_type(&callchain_param);
19d4ac3c 1263 if (ret)
0dba9e4b 1264 return ret;
19d4ac3c 1265
9d8b172f 1266 if (perf_session__register_idle_thread(top->session) < 0)
0dba9e4b 1267 return ret;
c53d138d 1268
0c6b4994
KL
1269 if (top->nr_threads_synthesize > 1)
1270 perf_set_multithreaded();
340b47f5 1271
16c66bc1
JO
1272 init_process_thread(top);
1273
a0c0a4ac
NK
1274 if (opts->record_namespaces)
1275 top->tool.namespace_events = true;
f382842f
NK
1276 if (opts->record_cgroup) {
1277#ifdef HAVE_FILE_HANDLE
1278 top->tool.cgroup_events = true;
1279#else
1280 pr_err("cgroup tracking is not supported.\n");
1281 return -1;
1282#endif
1283 }
a0c0a4ac 1284
e5416950 1285 ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
a40b95bc
ACM
1286 &top->session->machines.host,
1287 &top->record_opts);
1288 if (ret < 0)
2d45ef70 1289 pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
a40b95bc 1290
f382842f
NK
1291 ret = perf_event__synthesize_cgroups(&top->tool, perf_event__process,
1292 &top->session->machines.host);
1293 if (ret < 0)
1294 pr_debug("Couldn't synthesize cgroup events.\n");
1295
a33fbd56 1296 machine__synthesize_threads(&top->session->machines.host, &opts->target,
84111b9c 1297 top->evlist->core.threads, true, false,
0c6b4994 1298 top->nr_threads_synthesize);
340b47f5 1299
eab50517 1300 perf_set_multithreaded();
2e7ea3ab 1301
35a634f7 1302 if (perf_hpp_list.socket) {
2e7ea3ab 1303 ret = perf_env__read_cpu_topology_map(&perf_env);
0dba9e4b
CD
1304 if (ret < 0) {
1305 char errbuf[BUFSIZ];
1306 const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1307
1308 ui__error("Could not read the CPU topology map: %s\n", err);
1309 return ret;
1310 }
2e7ea3ab
KL
1311 }
1312
7d45f402
IR
1313 /*
1314 * Use global stat_config that is zero meaning aggr_mode is AGGR_NONE
1315 * and hybrid_merge is false.
1316 */
1317 evlist__uniquify_evsel_names(top->evlist, &stat_config);
11859e82
ACM
1318 ret = perf_top__start_counters(top);
1319 if (ret)
0dba9e4b 1320 return ret;
11859e82 1321
1758af10 1322 top->session->evlist = top->evlist;
7b56cce2 1323 perf_session__set_id_hdr_size(top->session);
07800601 1324
2376c67a
ACM
1325 /*
1326 * When perf is starting the traced process, all the events (apart from
1327 * group members) have enable_on_exec=1 set, so don't spoil it by
1328 * prematurely enabling them.
1329 *
1330 * XXX 'top' still doesn't start workloads like record, trace, but should,
1331 * so leave the check here.
1332 */
602ad878 1333 if (!target__none(&opts->target))
1c87f165 1334 evlist__enable(top->evlist);
2376c67a 1335
11859e82 1336 ret = -1;
16c66bc1
JO
1337 if (pthread_create(&thread_process, NULL, process_thread, top)) {
1338 ui__error("Could not create process thread.\n");
0dba9e4b 1339 return ret;
16c66bc1
JO
1340 }
1341
c0443df1 1342 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1758af10 1343 display_thread), top)) {
3780f488 1344 ui__error("Could not create display thread.\n");
16c66bc1 1345 goto out_join_thread;
07800601
IM
1346 }
1347
1758af10 1348 if (top->realtime_prio) {
07800601
IM
1349 struct sched_param param;
1350
1758af10 1351 param.sched_priority = top->realtime_prio;
07800601 1352 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
3780f488 1353 ui__error("Could not set realtime priority.\n");
ae256fa2 1354 goto out_join;
07800601
IM
1355 }
1356 }
1357
ff27a06a 1358 /* Wait for a minimal set of events before starting the snapshot */
80ab2987 1359 evlist__poll(top->evlist, 100);
ff27a06a
DM
1360
1361 perf_top__mmap_read(top);
1362
11859e82 1363 while (!done) {
1758af10 1364 u64 hits = top->samples;
07800601 1365
1758af10 1366 perf_top__mmap_read(top);
07800601 1367
ebebbf08 1368 if (opts->overwrite || (hits == top->samples))
80ab2987 1369 ret = evlist__poll(top->evlist, 100);
b135e5ee
JO
1370
1371 if (resize) {
1372 perf_top__resize(top);
1373 resize = 0;
1374 }
07800601
IM
1375 }
1376
11859e82 1377 ret = 0;
ae256fa2
JO
1378out_join:
1379 pthread_join(thread, NULL);
16c66bc1 1380out_join_thread:
d8e40b58 1381 cond_signal(&top->qe.cond);
16c66bc1 1382 pthread_join(thread_process, NULL);
eab50517 1383 perf_set_singlethreaded();
cddeeeda 1384 exit_process_thread(top);
11859e82 1385 return ret;
19d4ac3c
ACM
1386}
1387
1388static int
ae779a63 1389callchain_opt(const struct option *opt, const char *arg, int unset)
19d4ac3c 1390{
19d4ac3c 1391 symbol_conf.use_callchain = true;
ae779a63
JO
1392 return record_callchain_opt(opt, arg, unset);
1393}
19d4ac3c 1394
ae779a63
JO
1395static int
1396parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1397{
2ddd5c04 1398 struct callchain_param *callchain = opt->value;
a2c10d39 1399
2ddd5c04
ACM
1400 callchain->enabled = !unset;
1401 callchain->record_mode = CALLCHAIN_FP;
a2c10d39
NK
1402
1403 /*
1404 * --no-call-graph
1405 */
1406 if (unset) {
1407 symbol_conf.use_callchain = false;
2ddd5c04 1408 callchain->record_mode = CALLCHAIN_NONE;
a2c10d39
NK
1409 return 0;
1410 }
1411
1412 return parse_callchain_top_opt(arg);
07800601 1413}
b456bae0 1414
b8cbb349 1415static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
eb853e80 1416{
a3a4a3b3
YX
1417 if (!strcmp(var, "top.call-graph")) {
1418 var = "call-graph.record-mode";
1419 return perf_default_config(var, value, cb);
1420 }
104ac991
NK
1421 if (!strcmp(var, "top.children")) {
1422 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
1423 return 0;
1424 }
eb853e80 1425
b8cbb349 1426 return 0;
eb853e80
JO
1427}
1428
fa5df943
NK
1429static int
1430parse_percent_limit(const struct option *opt, const char *arg,
1431 int unset __maybe_unused)
1432{
1433 struct perf_top *top = opt->value;
1434
1435 top->min_percent = strtof(arg, NULL);
1436 return 0;
1437}
1438
76a26549
NK
1439const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1440 "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
a2c10d39 1441
b0ad8ea6 1442int cmd_top(int argc, const char **argv)
1758af10 1443{
16ad2ffb 1444 char errbuf[BUFSIZ];
1758af10
ACM
1445 struct perf_top top = {
1446 .count_filter = 5,
1447 .delay_secs = 2,
2376c67a
ACM
1448 .record_opts = {
1449 .mmap_pages = UINT_MAX,
1450 .user_freq = UINT_MAX,
1451 .user_interval = ULLONG_MAX,
1452 .freq = 4000, /* 4 KHz */
5dbb6e81 1453 .target = {
2376c67a
ACM
1454 .uses_mmap = true,
1455 },
218d6111
ACM
1456 /*
1457 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1458 * when we pause, fix that and reenable. Probably using a
1459 * separate evlist with a dummy event, i.e. a non-overwrite
1460 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1461 * stays in overwrite mode. -acme
1462 * */
1463 .overwrite = 0,
16c66bc1 1464 .sample_time = true,
1e6db2ee 1465 .sample_time_set = true,
d1cb9fce 1466 },
029c75e5 1467 .max_stack = sysctl__max_stack(),
0c6b4994 1468 .nr_threads_synthesize = UINT_MAX,
1758af10 1469 };
411ad22e
IR
1470 struct parse_events_option_args parse_events_option_args = {
1471 .evlistp = &top.evlist,
1472 };
5ef50613 1473 bool branch_call_mode = false;
b4006796 1474 struct record_opts *opts = &top.record_opts;
602ad878 1475 struct target *target = &opts->target;
57594454 1476 const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
1758af10 1477 const struct option options[] = {
411ad22e 1478 OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
86847b62 1479 "event selector. use 'perf list' to list available events",
f120f9d5 1480 parse_events_option),
af752016
IR
1481 OPT_CALLBACK(0, "filter", &top.evlist, "filter",
1482 "event filter", parse_filter),
2376c67a
ACM
1483 OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
1484 OPT_STRING('p', "pid", &target->pid, "pid",
d6d901c2 1485 "profile events on existing process id"),
2376c67a 1486 OPT_STRING('t', "tid", &target->tid, "tid",
d6d901c2 1487 "profile events on existing thread id"),
2376c67a 1488 OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
b456bae0 1489 "system-wide collection from all CPUs"),
2376c67a 1490 OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
c45c6ea2 1491 "list of cpus to monitor"),
b32d133a
ACM
1492 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1493 "file", "vmlinux pathname"),
fc2be696
WT
1494 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1495 "don't load vmlinux even if found"),
1b3aae90
ACM
1496 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1497 "file", "kallsyms pathname"),
8c3e10eb 1498 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
8ffcda17 1499 "hide kernel symbols"),
994a1f78 1500 OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
25f84702 1501 "number of mmap data pages", evlist__parse_mmap_pages),
1758af10 1502 OPT_INTEGER('r', "realtime", &top.realtime_prio,
b456bae0 1503 "collect data with this RT SCHED_FIFO priority"),
8c3e10eb 1504 OPT_INTEGER('d', "delay", &top.delay_secs,
b456bae0 1505 "number of seconds to delay between refreshes"),
1758af10 1506 OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
b456bae0 1507 "dump the symbol table used for profiling"),
8c3e10eb 1508 OPT_INTEGER('f', "count-filter", &top.count_filter,
b456bae0 1509 "only display functions with more events than this"),
2376c67a
ACM
1510 OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
1511 "child tasks do not inherit counters"),
1758af10 1512 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
6cff0e8d 1513 "symbol to annotate"),
2376c67a 1514 OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
7831bf23
ACM
1515 OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
1516 "profile at this frequency",
1517 record__parse_freq),
8c3e10eb 1518 OPT_INTEGER('E', "entries", &top.print_entries,
6e53cdf1 1519 "display this many functions"),
8c3e10eb 1520 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
8ffcda17 1521 "hide user symbols"),
3402ae0a 1522#ifdef HAVE_SLANG_SUPPORT
1758af10 1523 OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
3402ae0a 1524#endif
1758af10 1525 OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
c0555642 1526 OPT_INCR('v', "verbose", &verbose,
3da297a6 1527 "be more verbose (show counter open errors, etc)"),
ab81f3fd 1528 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
a2ce067e
NK
1529 "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1530 " Please refer the man page for the complete list."),
6fe8c26d
NK
1531 OPT_STRING(0, "fields", &field_order, "key[,keys...]",
1532 "output field(s): overhead, period, sample plus all of sort keys"),
ab81f3fd
ACM
1533 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1534 "Show a column with the number of samples"),
2ddd5c04 1535 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
a2c10d39 1536 NULL, "enables call-graph recording and display",
ae779a63 1537 &callchain_opt),
2ddd5c04 1538 OPT_CALLBACK(0, "call-graph", &callchain_param,
76a26549 1539 "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
a2c10d39 1540 top_callchain_help, &parse_callchain_opt),
1432ec34
NK
1541 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1542 "Accumulate callchains of children and show total overhead as well"),
5dbb6e81
WL
1543 OPT_INTEGER(0, "max-stack", &top.max_stack,
1544 "Set the maximum stack depth when parsing the callchain. "
4cb93446 1545 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
b21484f1
GP
1546 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1547 "ignore callees of these functions in call graphs",
1548 report_parse_ignore_callees_opt),
ab81f3fd
ACM
1549 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1550 "Show a column with the sum of periods"),
1551 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1552 "only consider symbols in these dsos"),
1553 OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1554 "only consider symbols in these comms"),
1555 OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1556 "only consider these symbols"),
c9a21a87 1557 OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src,
64c6f0c7 1558 "Interleave source code with assembly code (default)"),
c9a21a87 1559 OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw,
64c6f0c7 1560 "Display raw encoding of assembly instructions (default)"),
763122ad
AK
1561 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1562 "Enable kernel symbol demangling"),
ee7a112f 1563 OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
56d9117c 1564 OPT_STRING(0, "objdump", &objdump_path, "path",
0d3942db 1565 "objdump binary to use for disassembly and annotations"),
57594454
IR
1566 OPT_STRING(0, "addr2line", &addr2line_path, "path",
1567 "addr2line binary to use for line numbers"),
56d9117c 1568 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
64c6f0c7 1569 "Specify disassembler style (e.g. -M intel for intel syntax)"),
c9a21a87 1570 OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix",
3b0b16bf 1571 "Add prefix to source file path names in programs (with --prefix-strip)"),
c9a21a87 1572 OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N",
3b0b16bf 1573 "Strip first N entries of source file path name in programs (with --prefix)"),
2376c67a 1574 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
fa5df943
NK
1575 OPT_CALLBACK(0, "percent-limit", &top, "percent",
1576 "Don't show entries under that percent", parse_percent_limit),
33db4568
NK
1577 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1578 "How to display percentage of filtered entries", parse_filter_percentage),
cf59002f
NK
1579 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1580 "width[,width...]",
1581 "don't try to adjust column width, use these fixed values"),
3fcb10e4 1582 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
9d9cad76 1583 "per thread proc mmap processing timeout in ms"),
a18b027e
AK
1584 OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
1585 "branch any", "sample any taken branches",
1586 parse_branch_stack),
1587 OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
1588 "branch filter mask", "branch stack filter modes",
1589 parse_branch_stack),
5ef50613
AH
1590 OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1591 "add last branch records to call history"),
053a3989
NK
1592 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1593 "Show raw trace event output (do not use print fmt or plugins)"),
7727d59d 1594 OPT_BOOLEAN('H', "hierarchy", &symbol_conf.report_hierarchy,
c92fcfde 1595 "Show entries in a hierarchy"),
4e303fbe 1596 OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
218d6111 1597 "Use a backward ring buffer, default: no"),
868a8329 1598 OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
0c6b4994
KL
1599 OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1600 "number of thread to run event synthesize"),
51f38242
JM
1601 OPT_CALLBACK('G', "cgroup", &top.evlist, "name",
1602 "monitor event in cgroup name only", parse_cgroups),
a0c0a4ac
NK
1603 OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
1604 "Record namespaces events"),
f382842f
NK
1605 OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
1606 "Record cgroup events"),
df7deb2c
JY
1607 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1608 "Sort the output by the event at the index n in group. "
1609 "If n is invalid, sort by the first event. "
1610 "WARNING: should be used on grouped events."),
13e0c844
KL
1611 OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
1612 "Enable LBR callgraph stitching approach"),
70943490
SE
1613#ifdef HAVE_LIBPFM
1614 OPT_CALLBACK(0, "pfm-events", &top.evlist, "event",
1615 "libpfm4 event selector. use 'perf list' to list available events",
1616 parse_libpfm_events_option),
1617#endif
2f53ae34 1618 OPTS_EVSWITCH(&top.evswitch),
b456bae0 1619 OPT_END()
1758af10 1620 };
be772842
ACM
1621 const char * const top_usage[] = {
1622 "perf top [<options>]",
1623 NULL
1624 };
a635fc51
ACM
1625 int status = hists__init();
1626
1627 if (status < 0)
1628 return status;
b456bae0 1629
7f929aea 1630 annotation_options__init();
217b7d41 1631
c9a21a87
NK
1632 annotate_opts.min_pcnt = 5;
1633 annotate_opts.context = 4;
982d410b 1634
0f98b11c 1635 top.evlist = evlist__new();
8c3e10eb 1636 if (top.evlist == NULL)
361c99a6
ACM
1637 return -ENOMEM;
1638
ecc4c561
ACM
1639 status = perf_config(perf_top_config, &top);
1640 if (status)
1641 return status;
608127f7
ACM
1642 /*
1643 * Since the per arch annotation init routine may need the cpuid, read
1644 * it here, since we are not getting this from the perf.data header.
1645 */
1646 status = perf_env__read_cpuid(&perf_env);
1647 if (status) {
61208e6e
ACM
1648 /*
1649 * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
4d39c89f 1650 * warn the user explicitly.
61208e6e
ACM
1651 */
1652 eprintf(status == ENOSYS ? 1 : 0, verbose,
1653 "Couldn't read the cpuid for this machine: %s\n",
1654 str_error_r(errno, errbuf, sizeof(errbuf)));
608127f7
ACM
1655 }
1656 top.evlist->env = &perf_env;
eb853e80 1657
b456bae0
IM
1658 argc = parse_options(argc, argv, options, top_usage, 0);
1659 if (argc)
1660 usage_with_options(top_usage, options);
1661
56d9117c 1662 if (disassembler_style) {
c9a21a87
NK
1663 annotate_opts.disassembler_style = strdup(disassembler_style);
1664 if (!annotate_opts.disassembler_style)
56d9117c
IR
1665 return -ENOMEM;
1666 }
1667 if (objdump_path) {
c9a21a87
NK
1668 annotate_opts.objdump_path = strdup(objdump_path);
1669 if (!annotate_opts.objdump_path)
56d9117c
IR
1670 return -ENOMEM;
1671 }
57594454
IR
1672 if (addr2line_path) {
1673 symbol_conf.addr2line_path = strdup(addr2line_path);
1674 if (!symbol_conf.addr2line_path)
1675 return -ENOMEM;
1676 }
56d9117c 1677
7cc72553
JC
1678 status = symbol__validate_sym_arguments();
1679 if (status)
1680 goto out_delete_evlist;
1681
7f929aea 1682 if (annotate_check_args() < 0)
3b0b16bf
AK
1683 goto out_delete_evlist;
1684
7b100989
IR
1685 if (!top.evlist->core.nr_entries) {
1686 bool can_profile_kernel = perf_event_paranoid_check(1);
1687 int err = parse_event(top.evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
1688
1689 if (err)
1690 goto out_delete_evlist;
54f8f403
NK
1691 }
1692
2f53ae34
ACM
1693 status = evswitch__init(&top.evswitch, top.evlist, stderr);
1694 if (status)
1695 goto out_delete_evlist;
1696
c92fcfde
NK
1697 if (symbol_conf.report_hierarchy) {
1698 /* disable incompatible options */
1699 symbol_conf.event_group = false;
1700 symbol_conf.cumulate_callchain = false;
1701
1702 if (field_order) {
1703 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1704 parse_options_usage(top_usage, options, "fields", 0);
1705 parse_options_usage(NULL, options, "hierarchy", 0);
1706 goto out_delete_evlist;
1707 }
1708 }
1709
13e0c844
KL
1710 if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
1711 pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
1712 goto out_delete_evlist;
1713 }
1714
51f38242
JM
1715 if (nr_cgroups > 0 && opts->record_cgroup) {
1716 pr_err("--cgroup and --all-cgroups cannot be used together\n");
1717 goto out_delete_evlist;
1718 }
1719
5ef50613
AH
1720 if (branch_call_mode) {
1721 if (!opts->branch_stack)
1722 opts->branch_stack = PERF_SAMPLE_BRANCH_ANY;
1723 symbol_conf.use_callchain = true;
1724 callchain_param.key = CCKEY_ADDRESS;
1725 callchain_param.branch_callstack = true;
1726 callchain_param.enabled = true;
1727 if (callchain_param.record_mode == CALLCHAIN_NONE)
1728 callchain_param.record_mode = CALLCHAIN_FP;
1729 callchain_register_param(&callchain_param);
1730 if (!sort_order)
1731 sort_order = "srcline,symbol,dso";
1732 }
1733
590ac60d
JY
1734 if (opts->branch_stack && callchain_param.enabled)
1735 symbol_conf.show_branchflag_count = true;
1736
512ae1bd 1737 sort__mode = SORT_MODE__TOP;
6fe8c26d 1738 /* display thread wants entries to be collapsed in a different tree */
52225036 1739 perf_hpp_list.need_collapse = 1;
ab81f3fd 1740
3ee60c3b
ACM
1741 if (top.use_stdio)
1742 use_browser = 0;
3402ae0a 1743#ifdef HAVE_SLANG_SUPPORT
3ee60c3b
ACM
1744 else if (top.use_tui)
1745 use_browser = 1;
3402ae0a 1746#endif
3ee60c3b
ACM
1747
1748 setup_browser(false);
1749
40184c46 1750 if (setup_sorting(top.evlist) < 0) {
6fe8c26d
NK
1751 if (sort_order)
1752 parse_options_usage(top_usage, options, "s", 1);
1753 if (field_order)
1754 parse_options_usage(sort_order ? NULL : top_usage,
1755 options, "fields", 0);
d37a92dc
NK
1756 goto out_delete_evlist;
1757 }
ab81f3fd 1758
602ad878 1759 status = target__validate(target);
16ad2ffb 1760 if (status) {
602ad878 1761 target__strerror(target, status, errbuf, BUFSIZ);
ea432a8b 1762 ui__warning("%s\n", errbuf);
16ad2ffb
NK
1763 }
1764
602ad878 1765 status = target__parse_uid(target);
16ad2ffb
NK
1766 if (status) {
1767 int saved_errno = errno;
4bd0f2d2 1768
602ad878 1769 target__strerror(target, status, errbuf, BUFSIZ);
ea432a8b 1770 ui__error("%s\n", errbuf);
16ad2ffb
NK
1771
1772 status = -saved_errno;
0d37aa34 1773 goto out_delete_evlist;
16ad2ffb 1774 }
0d37aa34 1775
602ad878 1776 if (target__none(target))
2376c67a 1777 target->system_wide = true;
10b47d54 1778
7748bb71 1779 if (evlist__create_maps(top.evlist, target) < 0) {
f8a5c0b2 1780 ui__error("Couldn't create thread/CPU maps: %s\n",
c8b5f2c9 1781 errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
dc64641c 1782 status = -errno;
f8a5c0b2 1783 goto out_delete_evlist;
69aad6f1 1784 }
5a8e5a30 1785
8c3e10eb
ACM
1786 if (top.delay_secs < 1)
1787 top.delay_secs = 1;
2f335a02 1788
b4006796 1789 if (record_opts__config(opts)) {
2376c67a 1790 status = -EINVAL;
03ad9747 1791 goto out_delete_evlist;
69aad6f1
ACM
1792 }
1793
515dbe48 1794 top.sym_evsel = evlist__first(top.evlist);
cc841580 1795
e3815264 1796 if (!callchain_param.enabled) {
1432ec34 1797 symbol_conf.cumulate_callchain = false;
dbd11b6b 1798 perf_hpp__cancel_cumulate(top.evlist);
1432ec34
NK
1799 }
1800
792aeafa
NK
1801 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1802 callchain_param.order = ORDER_CALLER;
1803
b01141f4
ACM
1804 status = symbol__annotation_init();
1805 if (status < 0)
1806 goto out_delete_evlist;
69aad6f1 1807
7f929aea 1808 annotation_config__init();
7f0b6fde 1809
69aad6f1 1810 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
70c819e4
CD
1811 status = symbol__init(NULL);
1812 if (status < 0)
1813 goto out_delete_evlist;
69aad6f1 1814
08e71542 1815 sort__setup_elide(stdout);
ab81f3fd 1816
1758af10 1817 get_term_dimensions(&top.winsize);
8c3e10eb 1818 if (top.print_entries == 0) {
1758af10 1819 perf_top__update_print_entries(&top);
244a1086 1820 signal(SIGWINCH, winch_sig);
3b6ed988
ACM
1821 }
1822
2681bd85 1823 top.session = perf_session__new(NULL, NULL);
6ef81c55
MI
1824 if (IS_ERR(top.session)) {
1825 status = PTR_ERR(top.session);
ef23cb59 1826 top.session = NULL;
0dba9e4b
CD
1827 goto out_delete_evlist;
1828 }
1829
7788ad59
NK
1830 if (!evlist__needs_bpf_sb_event(top.evlist))
1831 top.record_opts.no_bpf_event = true;
1832
0c5f1acc 1833#ifdef HAVE_LIBBPF_SUPPORT
b38d85ef
ACM
1834 if (!top.record_opts.no_bpf_event) {
1835 top.sb_evlist = evlist__new();
1836
1837 if (top.sb_evlist == NULL) {
1838 pr_err("Couldn't create side band evlist.\n.");
dc64641c 1839 status = -EINVAL;
b38d85ef
ACM
1840 goto out_delete_evlist;
1841 }
1842
1843 if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
1844 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
dc64641c 1845 status = -EINVAL;
b38d85ef
ACM
1846 goto out_delete_evlist;
1847 }
1848 }
0c5f1acc 1849#endif
d56354dc 1850
08c83997 1851 if (evlist__start_sb_thread(top.sb_evlist, target)) {
657ee553
SL
1852 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1853 opts->no_bpf_event = true;
1854 }
1855
1758af10 1856 status = __cmd_top(&top);
806fb630 1857
657ee553 1858 if (!opts->no_bpf_event)
08c83997 1859 evlist__stop_sb_thread(top.sb_evlist);
657ee553 1860
0d37aa34 1861out_delete_evlist:
c12995a5 1862 evlist__delete(top.evlist);
0dba9e4b 1863 perf_session__delete(top.session);
7f929aea 1864 annotation_options__exit();
69aad6f1
ACM
1865
1866 return status;
b456bae0 1867}