]> git.ipfire.org Git - thirdparty/linux.git/blob - tools/perf/util/machine.c
Merge tag 'x86-fpu-2020-06-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[thirdparty/linux.git] / tools / perf / util / machine.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <dirent.h>
3 #include <errno.h>
4 #include <inttypes.h>
5 #include <regex.h>
6 #include <stdlib.h>
7 #include "callchain.h"
8 #include "debug.h"
9 #include "dso.h"
10 #include "env.h"
11 #include "event.h"
12 #include "evsel.h"
13 #include "hist.h"
14 #include "machine.h"
15 #include "map.h"
16 #include "map_symbol.h"
17 #include "branch.h"
18 #include "mem-events.h"
19 #include "srcline.h"
20 #include "symbol.h"
21 #include "sort.h"
22 #include "strlist.h"
23 #include "target.h"
24 #include "thread.h"
25 #include "util.h"
26 #include "vdso.h"
27 #include <stdbool.h>
28 #include <sys/types.h>
29 #include <sys/stat.h>
30 #include <unistd.h>
31 #include "unwind.h"
32 #include "linux/hash.h"
33 #include "asm/bug.h"
34 #include "bpf-event.h"
35 #include <internal/lib.h> // page_size
36 #include "cgroup.h"
37
38 #include <linux/ctype.h>
39 #include <symbol/kallsyms.h>
40 #include <linux/mman.h>
41 #include <linux/string.h>
42 #include <linux/zalloc.h>
43
44 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
45
46 static struct dso *machine__kernel_dso(struct machine *machine)
47 {
48 return machine->vmlinux_map->dso;
49 }
50
51 static void dsos__init(struct dsos *dsos)
52 {
53 INIT_LIST_HEAD(&dsos->head);
54 dsos->root = RB_ROOT;
55 init_rwsem(&dsos->lock);
56 }
57
58 static void machine__threads_init(struct machine *machine)
59 {
60 int i;
61
62 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
63 struct threads *threads = &machine->threads[i];
64 threads->entries = RB_ROOT_CACHED;
65 init_rwsem(&threads->lock);
66 threads->nr = 0;
67 INIT_LIST_HEAD(&threads->dead);
68 threads->last_match = NULL;
69 }
70 }
71
72 static int machine__set_mmap_name(struct machine *machine)
73 {
74 if (machine__is_host(machine))
75 machine->mmap_name = strdup("[kernel.kallsyms]");
76 else if (machine__is_default_guest(machine))
77 machine->mmap_name = strdup("[guest.kernel.kallsyms]");
78 else if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]",
79 machine->pid) < 0)
80 machine->mmap_name = NULL;
81
82 return machine->mmap_name ? 0 : -ENOMEM;
83 }
84
85 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
86 {
87 int err = -ENOMEM;
88
89 memset(machine, 0, sizeof(*machine));
90 maps__init(&machine->kmaps, machine);
91 RB_CLEAR_NODE(&machine->rb_node);
92 dsos__init(&machine->dsos);
93
94 machine__threads_init(machine);
95
96 machine->vdso_info = NULL;
97 machine->env = NULL;
98
99 machine->pid = pid;
100
101 machine->id_hdr_size = 0;
102 machine->kptr_restrict_warned = false;
103 machine->comm_exec = false;
104 machine->kernel_start = 0;
105 machine->vmlinux_map = NULL;
106
107 machine->root_dir = strdup(root_dir);
108 if (machine->root_dir == NULL)
109 return -ENOMEM;
110
111 if (machine__set_mmap_name(machine))
112 goto out;
113
114 if (pid != HOST_KERNEL_ID) {
115 struct thread *thread = machine__findnew_thread(machine, -1,
116 pid);
117 char comm[64];
118
119 if (thread == NULL)
120 goto out;
121
122 snprintf(comm, sizeof(comm), "[guest/%d]", pid);
123 thread__set_comm(thread, comm, 0);
124 thread__put(thread);
125 }
126
127 machine->current_tid = NULL;
128 err = 0;
129
130 out:
131 if (err) {
132 zfree(&machine->root_dir);
133 zfree(&machine->mmap_name);
134 }
135 return 0;
136 }
137
138 struct machine *machine__new_host(void)
139 {
140 struct machine *machine = malloc(sizeof(*machine));
141
142 if (machine != NULL) {
143 machine__init(machine, "", HOST_KERNEL_ID);
144
145 if (machine__create_kernel_maps(machine) < 0)
146 goto out_delete;
147 }
148
149 return machine;
150 out_delete:
151 free(machine);
152 return NULL;
153 }
154
155 struct machine *machine__new_kallsyms(void)
156 {
157 struct machine *machine = machine__new_host();
158 /*
159 * FIXME:
160 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
161 * ask for not using the kcore parsing code, once this one is fixed
162 * to create a map per module.
163 */
164 if (machine && machine__load_kallsyms(machine, "/proc/kallsyms") <= 0) {
165 machine__delete(machine);
166 machine = NULL;
167 }
168
169 return machine;
170 }
171
172 static void dsos__purge(struct dsos *dsos)
173 {
174 struct dso *pos, *n;
175
176 down_write(&dsos->lock);
177
178 list_for_each_entry_safe(pos, n, &dsos->head, node) {
179 RB_CLEAR_NODE(&pos->rb_node);
180 pos->root = NULL;
181 list_del_init(&pos->node);
182 dso__put(pos);
183 }
184
185 up_write(&dsos->lock);
186 }
187
188 static void dsos__exit(struct dsos *dsos)
189 {
190 dsos__purge(dsos);
191 exit_rwsem(&dsos->lock);
192 }
193
194 void machine__delete_threads(struct machine *machine)
195 {
196 struct rb_node *nd;
197 int i;
198
199 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
200 struct threads *threads = &machine->threads[i];
201 down_write(&threads->lock);
202 nd = rb_first_cached(&threads->entries);
203 while (nd) {
204 struct thread *t = rb_entry(nd, struct thread, rb_node);
205
206 nd = rb_next(nd);
207 __machine__remove_thread(machine, t, false);
208 }
209 up_write(&threads->lock);
210 }
211 }
212
213 void machine__exit(struct machine *machine)
214 {
215 int i;
216
217 if (machine == NULL)
218 return;
219
220 machine__destroy_kernel_maps(machine);
221 maps__exit(&machine->kmaps);
222 dsos__exit(&machine->dsos);
223 machine__exit_vdso(machine);
224 zfree(&machine->root_dir);
225 zfree(&machine->mmap_name);
226 zfree(&machine->current_tid);
227
228 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
229 struct threads *threads = &machine->threads[i];
230 struct thread *thread, *n;
231 /*
232 * Forget about the dead, at this point whatever threads were
233 * left in the dead lists better have a reference count taken
234 * by who is using them, and then, when they drop those references
235 * and it finally hits zero, thread__put() will check and see that
236 * its not in the dead threads list and will not try to remove it
237 * from there, just calling thread__delete() straight away.
238 */
239 list_for_each_entry_safe(thread, n, &threads->dead, node)
240 list_del_init(&thread->node);
241
242 exit_rwsem(&threads->lock);
243 }
244 }
245
246 void machine__delete(struct machine *machine)
247 {
248 if (machine) {
249 machine__exit(machine);
250 free(machine);
251 }
252 }
253
254 void machines__init(struct machines *machines)
255 {
256 machine__init(&machines->host, "", HOST_KERNEL_ID);
257 machines->guests = RB_ROOT_CACHED;
258 }
259
260 void machines__exit(struct machines *machines)
261 {
262 machine__exit(&machines->host);
263 /* XXX exit guest */
264 }
265
266 struct machine *machines__add(struct machines *machines, pid_t pid,
267 const char *root_dir)
268 {
269 struct rb_node **p = &machines->guests.rb_root.rb_node;
270 struct rb_node *parent = NULL;
271 struct machine *pos, *machine = malloc(sizeof(*machine));
272 bool leftmost = true;
273
274 if (machine == NULL)
275 return NULL;
276
277 if (machine__init(machine, root_dir, pid) != 0) {
278 free(machine);
279 return NULL;
280 }
281
282 while (*p != NULL) {
283 parent = *p;
284 pos = rb_entry(parent, struct machine, rb_node);
285 if (pid < pos->pid)
286 p = &(*p)->rb_left;
287 else {
288 p = &(*p)->rb_right;
289 leftmost = false;
290 }
291 }
292
293 rb_link_node(&machine->rb_node, parent, p);
294 rb_insert_color_cached(&machine->rb_node, &machines->guests, leftmost);
295
296 return machine;
297 }
298
299 void machines__set_comm_exec(struct machines *machines, bool comm_exec)
300 {
301 struct rb_node *nd;
302
303 machines->host.comm_exec = comm_exec;
304
305 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
306 struct machine *machine = rb_entry(nd, struct machine, rb_node);
307
308 machine->comm_exec = comm_exec;
309 }
310 }
311
312 struct machine *machines__find(struct machines *machines, pid_t pid)
313 {
314 struct rb_node **p = &machines->guests.rb_root.rb_node;
315 struct rb_node *parent = NULL;
316 struct machine *machine;
317 struct machine *default_machine = NULL;
318
319 if (pid == HOST_KERNEL_ID)
320 return &machines->host;
321
322 while (*p != NULL) {
323 parent = *p;
324 machine = rb_entry(parent, struct machine, rb_node);
325 if (pid < machine->pid)
326 p = &(*p)->rb_left;
327 else if (pid > machine->pid)
328 p = &(*p)->rb_right;
329 else
330 return machine;
331 if (!machine->pid)
332 default_machine = machine;
333 }
334
335 return default_machine;
336 }
337
338 struct machine *machines__findnew(struct machines *machines, pid_t pid)
339 {
340 char path[PATH_MAX];
341 const char *root_dir = "";
342 struct machine *machine = machines__find(machines, pid);
343
344 if (machine && (machine->pid == pid))
345 goto out;
346
347 if ((pid != HOST_KERNEL_ID) &&
348 (pid != DEFAULT_GUEST_KERNEL_ID) &&
349 (symbol_conf.guestmount)) {
350 sprintf(path, "%s/%d", symbol_conf.guestmount, pid);
351 if (access(path, R_OK)) {
352 static struct strlist *seen;
353
354 if (!seen)
355 seen = strlist__new(NULL, NULL);
356
357 if (!strlist__has_entry(seen, path)) {
358 pr_err("Can't access file %s\n", path);
359 strlist__add(seen, path);
360 }
361 machine = NULL;
362 goto out;
363 }
364 root_dir = path;
365 }
366
367 machine = machines__add(machines, pid, root_dir);
368 out:
369 return machine;
370 }
371
372 void machines__process_guests(struct machines *machines,
373 machine__process_t process, void *data)
374 {
375 struct rb_node *nd;
376
377 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
378 struct machine *pos = rb_entry(nd, struct machine, rb_node);
379 process(pos, data);
380 }
381 }
382
383 void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size)
384 {
385 struct rb_node *node;
386 struct machine *machine;
387
388 machines->host.id_hdr_size = id_hdr_size;
389
390 for (node = rb_first_cached(&machines->guests); node;
391 node = rb_next(node)) {
392 machine = rb_entry(node, struct machine, rb_node);
393 machine->id_hdr_size = id_hdr_size;
394 }
395
396 return;
397 }
398
399 static void machine__update_thread_pid(struct machine *machine,
400 struct thread *th, pid_t pid)
401 {
402 struct thread *leader;
403
404 if (pid == th->pid_ || pid == -1 || th->pid_ != -1)
405 return;
406
407 th->pid_ = pid;
408
409 if (th->pid_ == th->tid)
410 return;
411
412 leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
413 if (!leader)
414 goto out_err;
415
416 if (!leader->maps)
417 leader->maps = maps__new(machine);
418
419 if (!leader->maps)
420 goto out_err;
421
422 if (th->maps == leader->maps)
423 return;
424
425 if (th->maps) {
426 /*
427 * Maps are created from MMAP events which provide the pid and
428 * tid. Consequently there never should be any maps on a thread
429 * with an unknown pid. Just print an error if there are.
430 */
431 if (!maps__empty(th->maps))
432 pr_err("Discarding thread maps for %d:%d\n",
433 th->pid_, th->tid);
434 maps__put(th->maps);
435 }
436
437 th->maps = maps__get(leader->maps);
438 out_put:
439 thread__put(leader);
440 return;
441 out_err:
442 pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
443 goto out_put;
444 }
445
446 /*
447 * Front-end cache - TID lookups come in blocks,
448 * so most of the time we dont have to look up
449 * the full rbtree:
450 */
451 static struct thread*
452 __threads__get_last_match(struct threads *threads, struct machine *machine,
453 int pid, int tid)
454 {
455 struct thread *th;
456
457 th = threads->last_match;
458 if (th != NULL) {
459 if (th->tid == tid) {
460 machine__update_thread_pid(machine, th, pid);
461 return thread__get(th);
462 }
463
464 threads->last_match = NULL;
465 }
466
467 return NULL;
468 }
469
470 static struct thread*
471 threads__get_last_match(struct threads *threads, struct machine *machine,
472 int pid, int tid)
473 {
474 struct thread *th = NULL;
475
476 if (perf_singlethreaded)
477 th = __threads__get_last_match(threads, machine, pid, tid);
478
479 return th;
480 }
481
482 static void
483 __threads__set_last_match(struct threads *threads, struct thread *th)
484 {
485 threads->last_match = th;
486 }
487
488 static void
489 threads__set_last_match(struct threads *threads, struct thread *th)
490 {
491 if (perf_singlethreaded)
492 __threads__set_last_match(threads, th);
493 }
494
495 /*
496 * Caller must eventually drop thread->refcnt returned with a successful
497 * lookup/new thread inserted.
498 */
499 static struct thread *____machine__findnew_thread(struct machine *machine,
500 struct threads *threads,
501 pid_t pid, pid_t tid,
502 bool create)
503 {
504 struct rb_node **p = &threads->entries.rb_root.rb_node;
505 struct rb_node *parent = NULL;
506 struct thread *th;
507 bool leftmost = true;
508
509 th = threads__get_last_match(threads, machine, pid, tid);
510 if (th)
511 return th;
512
513 while (*p != NULL) {
514 parent = *p;
515 th = rb_entry(parent, struct thread, rb_node);
516
517 if (th->tid == tid) {
518 threads__set_last_match(threads, th);
519 machine__update_thread_pid(machine, th, pid);
520 return thread__get(th);
521 }
522
523 if (tid < th->tid)
524 p = &(*p)->rb_left;
525 else {
526 p = &(*p)->rb_right;
527 leftmost = false;
528 }
529 }
530
531 if (!create)
532 return NULL;
533
534 th = thread__new(pid, tid);
535 if (th != NULL) {
536 rb_link_node(&th->rb_node, parent, p);
537 rb_insert_color_cached(&th->rb_node, &threads->entries, leftmost);
538
539 /*
540 * We have to initialize maps separately after rb tree is updated.
541 *
542 * The reason is that we call machine__findnew_thread
543 * within thread__init_maps to find the thread
544 * leader and that would screwed the rb tree.
545 */
546 if (thread__init_maps(th, machine)) {
547 rb_erase_cached(&th->rb_node, &threads->entries);
548 RB_CLEAR_NODE(&th->rb_node);
549 thread__put(th);
550 return NULL;
551 }
552 /*
553 * It is now in the rbtree, get a ref
554 */
555 thread__get(th);
556 threads__set_last_match(threads, th);
557 ++threads->nr;
558 }
559
560 return th;
561 }
562
563 struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
564 {
565 return ____machine__findnew_thread(machine, machine__threads(machine, tid), pid, tid, true);
566 }
567
568 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
569 pid_t tid)
570 {
571 struct threads *threads = machine__threads(machine, tid);
572 struct thread *th;
573
574 down_write(&threads->lock);
575 th = __machine__findnew_thread(machine, pid, tid);
576 up_write(&threads->lock);
577 return th;
578 }
579
580 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
581 pid_t tid)
582 {
583 struct threads *threads = machine__threads(machine, tid);
584 struct thread *th;
585
586 down_read(&threads->lock);
587 th = ____machine__findnew_thread(machine, threads, pid, tid, false);
588 up_read(&threads->lock);
589 return th;
590 }
591
592 struct comm *machine__thread_exec_comm(struct machine *machine,
593 struct thread *thread)
594 {
595 if (machine->comm_exec)
596 return thread__exec_comm(thread);
597 else
598 return thread__comm(thread);
599 }
600
601 int machine__process_comm_event(struct machine *machine, union perf_event *event,
602 struct perf_sample *sample)
603 {
604 struct thread *thread = machine__findnew_thread(machine,
605 event->comm.pid,
606 event->comm.tid);
607 bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
608 int err = 0;
609
610 if (exec)
611 machine->comm_exec = true;
612
613 if (dump_trace)
614 perf_event__fprintf_comm(event, stdout);
615
616 if (thread == NULL ||
617 __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
618 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
619 err = -1;
620 }
621
622 thread__put(thread);
623
624 return err;
625 }
626
627 int machine__process_namespaces_event(struct machine *machine __maybe_unused,
628 union perf_event *event,
629 struct perf_sample *sample __maybe_unused)
630 {
631 struct thread *thread = machine__findnew_thread(machine,
632 event->namespaces.pid,
633 event->namespaces.tid);
634 int err = 0;
635
636 WARN_ONCE(event->namespaces.nr_namespaces > NR_NAMESPACES,
637 "\nWARNING: kernel seems to support more namespaces than perf"
638 " tool.\nTry updating the perf tool..\n\n");
639
640 WARN_ONCE(event->namespaces.nr_namespaces < NR_NAMESPACES,
641 "\nWARNING: perf tool seems to support more namespaces than"
642 " the kernel.\nTry updating the kernel..\n\n");
643
644 if (dump_trace)
645 perf_event__fprintf_namespaces(event, stdout);
646
647 if (thread == NULL ||
648 thread__set_namespaces(thread, sample->time, &event->namespaces)) {
649 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
650 err = -1;
651 }
652
653 thread__put(thread);
654
655 return err;
656 }
657
658 int machine__process_cgroup_event(struct machine *machine,
659 union perf_event *event,
660 struct perf_sample *sample __maybe_unused)
661 {
662 struct cgroup *cgrp;
663
664 if (dump_trace)
665 perf_event__fprintf_cgroup(event, stdout);
666
667 cgrp = cgroup__findnew(machine->env, event->cgroup.id, event->cgroup.path);
668 if (cgrp == NULL)
669 return -ENOMEM;
670
671 return 0;
672 }
673
674 int machine__process_lost_event(struct machine *machine __maybe_unused,
675 union perf_event *event, struct perf_sample *sample __maybe_unused)
676 {
677 dump_printf(": id:%" PRI_lu64 ": lost:%" PRI_lu64 "\n",
678 event->lost.id, event->lost.lost);
679 return 0;
680 }
681
682 int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
683 union perf_event *event, struct perf_sample *sample)
684 {
685 dump_printf(": id:%" PRIu64 ": lost samples :%" PRI_lu64 "\n",
686 sample->id, event->lost_samples.lost);
687 return 0;
688 }
689
690 static struct dso *machine__findnew_module_dso(struct machine *machine,
691 struct kmod_path *m,
692 const char *filename)
693 {
694 struct dso *dso;
695
696 down_write(&machine->dsos.lock);
697
698 dso = __dsos__find(&machine->dsos, m->name, true);
699 if (!dso) {
700 dso = __dsos__addnew(&machine->dsos, m->name);
701 if (dso == NULL)
702 goto out_unlock;
703
704 dso__set_module_info(dso, m, machine);
705 dso__set_long_name(dso, strdup(filename), true);
706 dso->kernel = DSO_TYPE_KERNEL;
707 }
708
709 dso__get(dso);
710 out_unlock:
711 up_write(&machine->dsos.lock);
712 return dso;
713 }
714
715 int machine__process_aux_event(struct machine *machine __maybe_unused,
716 union perf_event *event)
717 {
718 if (dump_trace)
719 perf_event__fprintf_aux(event, stdout);
720 return 0;
721 }
722
723 int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
724 union perf_event *event)
725 {
726 if (dump_trace)
727 perf_event__fprintf_itrace_start(event, stdout);
728 return 0;
729 }
730
731 int machine__process_switch_event(struct machine *machine __maybe_unused,
732 union perf_event *event)
733 {
734 if (dump_trace)
735 perf_event__fprintf_switch(event, stdout);
736 return 0;
737 }
738
739 static int is_bpf_image(const char *name)
740 {
741 return strncmp(name, "bpf_trampoline_", sizeof("bpf_trampoline_") - 1) ||
742 strncmp(name, "bpf_dispatcher_", sizeof("bpf_dispatcher_") - 1);
743 }
744
745 static int machine__process_ksymbol_register(struct machine *machine,
746 union perf_event *event,
747 struct perf_sample *sample __maybe_unused)
748 {
749 struct symbol *sym;
750 struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
751
752 if (!map) {
753 struct dso *dso = dso__new(event->ksymbol.name);
754
755 if (dso) {
756 dso->kernel = DSO_TYPE_KERNEL;
757 map = map__new2(0, dso);
758 }
759
760 if (!dso || !map) {
761 dso__put(dso);
762 return -ENOMEM;
763 }
764
765 map->start = event->ksymbol.addr;
766 map->end = map->start + event->ksymbol.len;
767 maps__insert(&machine->kmaps, map);
768 dso__set_loaded(dso);
769
770 if (is_bpf_image(event->ksymbol.name)) {
771 dso->binary_type = DSO_BINARY_TYPE__BPF_IMAGE;
772 dso__set_long_name(dso, "", false);
773 }
774 }
775
776 sym = symbol__new(map->map_ip(map, map->start),
777 event->ksymbol.len,
778 0, 0, event->ksymbol.name);
779 if (!sym)
780 return -ENOMEM;
781 dso__insert_symbol(map->dso, sym);
782 return 0;
783 }
784
785 static int machine__process_ksymbol_unregister(struct machine *machine,
786 union perf_event *event,
787 struct perf_sample *sample __maybe_unused)
788 {
789 struct map *map;
790
791 map = maps__find(&machine->kmaps, event->ksymbol.addr);
792 if (map)
793 maps__remove(&machine->kmaps, map);
794
795 return 0;
796 }
797
798 int machine__process_ksymbol(struct machine *machine __maybe_unused,
799 union perf_event *event,
800 struct perf_sample *sample)
801 {
802 if (dump_trace)
803 perf_event__fprintf_ksymbol(event, stdout);
804
805 if (event->ksymbol.flags & PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER)
806 return machine__process_ksymbol_unregister(machine, event,
807 sample);
808 return machine__process_ksymbol_register(machine, event, sample);
809 }
810
811 static struct map *machine__addnew_module_map(struct machine *machine, u64 start,
812 const char *filename)
813 {
814 struct map *map = NULL;
815 struct kmod_path m;
816 struct dso *dso;
817
818 if (kmod_path__parse_name(&m, filename))
819 return NULL;
820
821 dso = machine__findnew_module_dso(machine, &m, filename);
822 if (dso == NULL)
823 goto out;
824
825 map = map__new2(start, dso);
826 if (map == NULL)
827 goto out;
828
829 maps__insert(&machine->kmaps, map);
830
831 /* Put the map here because maps__insert alread got it */
832 map__put(map);
833 out:
834 /* put the dso here, corresponding to machine__findnew_module_dso */
835 dso__put(dso);
836 zfree(&m.name);
837 return map;
838 }
839
840 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
841 {
842 struct rb_node *nd;
843 size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
844
845 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
846 struct machine *pos = rb_entry(nd, struct machine, rb_node);
847 ret += __dsos__fprintf(&pos->dsos.head, fp);
848 }
849
850 return ret;
851 }
852
853 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
854 bool (skip)(struct dso *dso, int parm), int parm)
855 {
856 return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
857 }
858
859 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
860 bool (skip)(struct dso *dso, int parm), int parm)
861 {
862 struct rb_node *nd;
863 size_t ret = machine__fprintf_dsos_buildid(&machines->host, fp, skip, parm);
864
865 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
866 struct machine *pos = rb_entry(nd, struct machine, rb_node);
867 ret += machine__fprintf_dsos_buildid(pos, fp, skip, parm);
868 }
869 return ret;
870 }
871
872 size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp)
873 {
874 int i;
875 size_t printed = 0;
876 struct dso *kdso = machine__kernel_dso(machine);
877
878 if (kdso->has_build_id) {
879 char filename[PATH_MAX];
880 if (dso__build_id_filename(kdso, filename, sizeof(filename),
881 false))
882 printed += fprintf(fp, "[0] %s\n", filename);
883 }
884
885 for (i = 0; i < vmlinux_path__nr_entries; ++i)
886 printed += fprintf(fp, "[%d] %s\n",
887 i + kdso->has_build_id, vmlinux_path[i]);
888
889 return printed;
890 }
891
892 size_t machine__fprintf(struct machine *machine, FILE *fp)
893 {
894 struct rb_node *nd;
895 size_t ret;
896 int i;
897
898 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
899 struct threads *threads = &machine->threads[i];
900
901 down_read(&threads->lock);
902
903 ret = fprintf(fp, "Threads: %u\n", threads->nr);
904
905 for (nd = rb_first_cached(&threads->entries); nd;
906 nd = rb_next(nd)) {
907 struct thread *pos = rb_entry(nd, struct thread, rb_node);
908
909 ret += thread__fprintf(pos, fp);
910 }
911
912 up_read(&threads->lock);
913 }
914 return ret;
915 }
916
917 static struct dso *machine__get_kernel(struct machine *machine)
918 {
919 const char *vmlinux_name = machine->mmap_name;
920 struct dso *kernel;
921
922 if (machine__is_host(machine)) {
923 if (symbol_conf.vmlinux_name)
924 vmlinux_name = symbol_conf.vmlinux_name;
925
926 kernel = machine__findnew_kernel(machine, vmlinux_name,
927 "[kernel]", DSO_TYPE_KERNEL);
928 } else {
929 if (symbol_conf.default_guest_vmlinux_name)
930 vmlinux_name = symbol_conf.default_guest_vmlinux_name;
931
932 kernel = machine__findnew_kernel(machine, vmlinux_name,
933 "[guest.kernel]",
934 DSO_TYPE_GUEST_KERNEL);
935 }
936
937 if (kernel != NULL && (!kernel->has_build_id))
938 dso__read_running_kernel_build_id(kernel, machine);
939
940 return kernel;
941 }
942
943 struct process_args {
944 u64 start;
945 };
946
947 void machine__get_kallsyms_filename(struct machine *machine, char *buf,
948 size_t bufsz)
949 {
950 if (machine__is_default_guest(machine))
951 scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
952 else
953 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
954 }
955
956 const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
957
958 /* Figure out the start address of kernel map from /proc/kallsyms.
959 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
960 * symbol_name if it's not that important.
961 */
962 static int machine__get_running_kernel_start(struct machine *machine,
963 const char **symbol_name,
964 u64 *start, u64 *end)
965 {
966 char filename[PATH_MAX];
967 int i, err = -1;
968 const char *name;
969 u64 addr = 0;
970
971 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
972
973 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
974 return 0;
975
976 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
977 err = kallsyms__get_function_start(filename, name, &addr);
978 if (!err)
979 break;
980 }
981
982 if (err)
983 return -1;
984
985 if (symbol_name)
986 *symbol_name = name;
987
988 *start = addr;
989
990 err = kallsyms__get_function_start(filename, "_etext", &addr);
991 if (!err)
992 *end = addr;
993
994 return 0;
995 }
996
997 int machine__create_extra_kernel_map(struct machine *machine,
998 struct dso *kernel,
999 struct extra_kernel_map *xm)
1000 {
1001 struct kmap *kmap;
1002 struct map *map;
1003
1004 map = map__new2(xm->start, kernel);
1005 if (!map)
1006 return -1;
1007
1008 map->end = xm->end;
1009 map->pgoff = xm->pgoff;
1010
1011 kmap = map__kmap(map);
1012
1013 strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
1014
1015 maps__insert(&machine->kmaps, map);
1016
1017 pr_debug2("Added extra kernel map %s %" PRIx64 "-%" PRIx64 "\n",
1018 kmap->name, map->start, map->end);
1019
1020 map__put(map);
1021
1022 return 0;
1023 }
1024
1025 static u64 find_entry_trampoline(struct dso *dso)
1026 {
1027 /* Duplicates are removed so lookup all aliases */
1028 const char *syms[] = {
1029 "_entry_trampoline",
1030 "__entry_trampoline_start",
1031 "entry_SYSCALL_64_trampoline",
1032 };
1033 struct symbol *sym = dso__first_symbol(dso);
1034 unsigned int i;
1035
1036 for (; sym; sym = dso__next_symbol(sym)) {
1037 if (sym->binding != STB_GLOBAL)
1038 continue;
1039 for (i = 0; i < ARRAY_SIZE(syms); i++) {
1040 if (!strcmp(sym->name, syms[i]))
1041 return sym->start;
1042 }
1043 }
1044
1045 return 0;
1046 }
1047
1048 /*
1049 * These values can be used for kernels that do not have symbols for the entry
1050 * trampolines in kallsyms.
1051 */
1052 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1053 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1054 #define X86_64_ENTRY_TRAMPOLINE 0x6000
1055
1056 /* Map x86_64 PTI entry trampolines */
1057 int machine__map_x86_64_entry_trampolines(struct machine *machine,
1058 struct dso *kernel)
1059 {
1060 struct maps *kmaps = &machine->kmaps;
1061 int nr_cpus_avail, cpu;
1062 bool found = false;
1063 struct map *map;
1064 u64 pgoff;
1065
1066 /*
1067 * In the vmlinux case, pgoff is a virtual address which must now be
1068 * mapped to a vmlinux offset.
1069 */
1070 maps__for_each_entry(kmaps, map) {
1071 struct kmap *kmap = __map__kmap(map);
1072 struct map *dest_map;
1073
1074 if (!kmap || !is_entry_trampoline(kmap->name))
1075 continue;
1076
1077 dest_map = maps__find(kmaps, map->pgoff);
1078 if (dest_map != map)
1079 map->pgoff = dest_map->map_ip(dest_map, map->pgoff);
1080 found = true;
1081 }
1082 if (found || machine->trampolines_mapped)
1083 return 0;
1084
1085 pgoff = find_entry_trampoline(kernel);
1086 if (!pgoff)
1087 return 0;
1088
1089 nr_cpus_avail = machine__nr_cpus_avail(machine);
1090
1091 /* Add a 1 page map for each CPU's entry trampoline */
1092 for (cpu = 0; cpu < nr_cpus_avail; cpu++) {
1093 u64 va = X86_64_CPU_ENTRY_AREA_PER_CPU +
1094 cpu * X86_64_CPU_ENTRY_AREA_SIZE +
1095 X86_64_ENTRY_TRAMPOLINE;
1096 struct extra_kernel_map xm = {
1097 .start = va,
1098 .end = va + page_size,
1099 .pgoff = pgoff,
1100 };
1101
1102 strlcpy(xm.name, ENTRY_TRAMPOLINE_NAME, KMAP_NAME_LEN);
1103
1104 if (machine__create_extra_kernel_map(machine, kernel, &xm) < 0)
1105 return -1;
1106 }
1107
1108 machine->trampolines_mapped = nr_cpus_avail;
1109
1110 return 0;
1111 }
1112
1113 int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unused,
1114 struct dso *kernel __maybe_unused)
1115 {
1116 return 0;
1117 }
1118
1119 static int
1120 __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
1121 {
1122 /* In case of renewal the kernel map, destroy previous one */
1123 machine__destroy_kernel_maps(machine);
1124
1125 machine->vmlinux_map = map__new2(0, kernel);
1126 if (machine->vmlinux_map == NULL)
1127 return -1;
1128
1129 machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
1130 maps__insert(&machine->kmaps, machine->vmlinux_map);
1131 return 0;
1132 }
1133
1134 void machine__destroy_kernel_maps(struct machine *machine)
1135 {
1136 struct kmap *kmap;
1137 struct map *map = machine__kernel_map(machine);
1138
1139 if (map == NULL)
1140 return;
1141
1142 kmap = map__kmap(map);
1143 maps__remove(&machine->kmaps, map);
1144 if (kmap && kmap->ref_reloc_sym) {
1145 zfree((char **)&kmap->ref_reloc_sym->name);
1146 zfree(&kmap->ref_reloc_sym);
1147 }
1148
1149 map__zput(machine->vmlinux_map);
1150 }
1151
1152 int machines__create_guest_kernel_maps(struct machines *machines)
1153 {
1154 int ret = 0;
1155 struct dirent **namelist = NULL;
1156 int i, items = 0;
1157 char path[PATH_MAX];
1158 pid_t pid;
1159 char *endp;
1160
1161 if (symbol_conf.default_guest_vmlinux_name ||
1162 symbol_conf.default_guest_modules ||
1163 symbol_conf.default_guest_kallsyms) {
1164 machines__create_kernel_maps(machines, DEFAULT_GUEST_KERNEL_ID);
1165 }
1166
1167 if (symbol_conf.guestmount) {
1168 items = scandir(symbol_conf.guestmount, &namelist, NULL, NULL);
1169 if (items <= 0)
1170 return -ENOENT;
1171 for (i = 0; i < items; i++) {
1172 if (!isdigit(namelist[i]->d_name[0])) {
1173 /* Filter out . and .. */
1174 continue;
1175 }
1176 pid = (pid_t)strtol(namelist[i]->d_name, &endp, 10);
1177 if ((*endp != '\0') ||
1178 (endp == namelist[i]->d_name) ||
1179 (errno == ERANGE)) {
1180 pr_debug("invalid directory (%s). Skipping.\n",
1181 namelist[i]->d_name);
1182 continue;
1183 }
1184 sprintf(path, "%s/%s/proc/kallsyms",
1185 symbol_conf.guestmount,
1186 namelist[i]->d_name);
1187 ret = access(path, R_OK);
1188 if (ret) {
1189 pr_debug("Can't access file %s\n", path);
1190 goto failure;
1191 }
1192 machines__create_kernel_maps(machines, pid);
1193 }
1194 failure:
1195 free(namelist);
1196 }
1197
1198 return ret;
1199 }
1200
1201 void machines__destroy_kernel_maps(struct machines *machines)
1202 {
1203 struct rb_node *next = rb_first_cached(&machines->guests);
1204
1205 machine__destroy_kernel_maps(&machines->host);
1206
1207 while (next) {
1208 struct machine *pos = rb_entry(next, struct machine, rb_node);
1209
1210 next = rb_next(&pos->rb_node);
1211 rb_erase_cached(&pos->rb_node, &machines->guests);
1212 machine__delete(pos);
1213 }
1214 }
1215
1216 int machines__create_kernel_maps(struct machines *machines, pid_t pid)
1217 {
1218 struct machine *machine = machines__findnew(machines, pid);
1219
1220 if (machine == NULL)
1221 return -1;
1222
1223 return machine__create_kernel_maps(machine);
1224 }
1225
1226 int machine__load_kallsyms(struct machine *machine, const char *filename)
1227 {
1228 struct map *map = machine__kernel_map(machine);
1229 int ret = __dso__load_kallsyms(map->dso, filename, map, true);
1230
1231 if (ret > 0) {
1232 dso__set_loaded(map->dso);
1233 /*
1234 * Since /proc/kallsyms will have multiple sessions for the
1235 * kernel, with modules between them, fixup the end of all
1236 * sections.
1237 */
1238 maps__fixup_end(&machine->kmaps);
1239 }
1240
1241 return ret;
1242 }
1243
1244 int machine__load_vmlinux_path(struct machine *machine)
1245 {
1246 struct map *map = machine__kernel_map(machine);
1247 int ret = dso__load_vmlinux_path(map->dso, map);
1248
1249 if (ret > 0)
1250 dso__set_loaded(map->dso);
1251
1252 return ret;
1253 }
1254
1255 static char *get_kernel_version(const char *root_dir)
1256 {
1257 char version[PATH_MAX];
1258 FILE *file;
1259 char *name, *tmp;
1260 const char *prefix = "Linux version ";
1261
1262 sprintf(version, "%s/proc/version", root_dir);
1263 file = fopen(version, "r");
1264 if (!file)
1265 return NULL;
1266
1267 tmp = fgets(version, sizeof(version), file);
1268 fclose(file);
1269 if (!tmp)
1270 return NULL;
1271
1272 name = strstr(version, prefix);
1273 if (!name)
1274 return NULL;
1275 name += strlen(prefix);
1276 tmp = strchr(name, ' ');
1277 if (tmp)
1278 *tmp = '\0';
1279
1280 return strdup(name);
1281 }
1282
1283 static bool is_kmod_dso(struct dso *dso)
1284 {
1285 return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
1286 dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE;
1287 }
1288
1289 static int maps__set_module_path(struct maps *maps, const char *path, struct kmod_path *m)
1290 {
1291 char *long_name;
1292 struct map *map = maps__find_by_name(maps, m->name);
1293
1294 if (map == NULL)
1295 return 0;
1296
1297 long_name = strdup(path);
1298 if (long_name == NULL)
1299 return -ENOMEM;
1300
1301 dso__set_long_name(map->dso, long_name, true);
1302 dso__kernel_module_get_build_id(map->dso, "");
1303
1304 /*
1305 * Full name could reveal us kmod compression, so
1306 * we need to update the symtab_type if needed.
1307 */
1308 if (m->comp && is_kmod_dso(map->dso)) {
1309 map->dso->symtab_type++;
1310 map->dso->comp = m->comp;
1311 }
1312
1313 return 0;
1314 }
1315
1316 static int maps__set_modules_path_dir(struct maps *maps, const char *dir_name, int depth)
1317 {
1318 struct dirent *dent;
1319 DIR *dir = opendir(dir_name);
1320 int ret = 0;
1321
1322 if (!dir) {
1323 pr_debug("%s: cannot open %s dir\n", __func__, dir_name);
1324 return -1;
1325 }
1326
1327 while ((dent = readdir(dir)) != NULL) {
1328 char path[PATH_MAX];
1329 struct stat st;
1330
1331 /*sshfs might return bad dent->d_type, so we have to stat*/
1332 snprintf(path, sizeof(path), "%s/%s", dir_name, dent->d_name);
1333 if (stat(path, &st))
1334 continue;
1335
1336 if (S_ISDIR(st.st_mode)) {
1337 if (!strcmp(dent->d_name, ".") ||
1338 !strcmp(dent->d_name, ".."))
1339 continue;
1340
1341 /* Do not follow top-level source and build symlinks */
1342 if (depth == 0) {
1343 if (!strcmp(dent->d_name, "source") ||
1344 !strcmp(dent->d_name, "build"))
1345 continue;
1346 }
1347
1348 ret = maps__set_modules_path_dir(maps, path, depth + 1);
1349 if (ret < 0)
1350 goto out;
1351 } else {
1352 struct kmod_path m;
1353
1354 ret = kmod_path__parse_name(&m, dent->d_name);
1355 if (ret)
1356 goto out;
1357
1358 if (m.kmod)
1359 ret = maps__set_module_path(maps, path, &m);
1360
1361 zfree(&m.name);
1362
1363 if (ret)
1364 goto out;
1365 }
1366 }
1367
1368 out:
1369 closedir(dir);
1370 return ret;
1371 }
1372
1373 static int machine__set_modules_path(struct machine *machine)
1374 {
1375 char *version;
1376 char modules_path[PATH_MAX];
1377
1378 version = get_kernel_version(machine->root_dir);
1379 if (!version)
1380 return -1;
1381
1382 snprintf(modules_path, sizeof(modules_path), "%s/lib/modules/%s",
1383 machine->root_dir, version);
1384 free(version);
1385
1386 return maps__set_modules_path_dir(&machine->kmaps, modules_path, 0);
1387 }
1388 int __weak arch__fix_module_text_start(u64 *start __maybe_unused,
1389 u64 *size __maybe_unused,
1390 const char *name __maybe_unused)
1391 {
1392 return 0;
1393 }
1394
1395 static int machine__create_module(void *arg, const char *name, u64 start,
1396 u64 size)
1397 {
1398 struct machine *machine = arg;
1399 struct map *map;
1400
1401 if (arch__fix_module_text_start(&start, &size, name) < 0)
1402 return -1;
1403
1404 map = machine__addnew_module_map(machine, start, name);
1405 if (map == NULL)
1406 return -1;
1407 map->end = start + size;
1408
1409 dso__kernel_module_get_build_id(map->dso, machine->root_dir);
1410
1411 return 0;
1412 }
1413
1414 static int machine__create_modules(struct machine *machine)
1415 {
1416 const char *modules;
1417 char path[PATH_MAX];
1418
1419 if (machine__is_default_guest(machine)) {
1420 modules = symbol_conf.default_guest_modules;
1421 } else {
1422 snprintf(path, PATH_MAX, "%s/proc/modules", machine->root_dir);
1423 modules = path;
1424 }
1425
1426 if (symbol__restricted_filename(modules, "/proc/modules"))
1427 return -1;
1428
1429 if (modules__parse(modules, machine, machine__create_module))
1430 return -1;
1431
1432 if (!machine__set_modules_path(machine))
1433 return 0;
1434
1435 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1436
1437 return 0;
1438 }
1439
1440 static void machine__set_kernel_mmap(struct machine *machine,
1441 u64 start, u64 end)
1442 {
1443 machine->vmlinux_map->start = start;
1444 machine->vmlinux_map->end = end;
1445 /*
1446 * Be a bit paranoid here, some perf.data file came with
1447 * a zero sized synthesized MMAP event for the kernel.
1448 */
1449 if (start == 0 && end == 0)
1450 machine->vmlinux_map->end = ~0ULL;
1451 }
1452
1453 static void machine__update_kernel_mmap(struct machine *machine,
1454 u64 start, u64 end)
1455 {
1456 struct map *map = machine__kernel_map(machine);
1457
1458 map__get(map);
1459 maps__remove(&machine->kmaps, map);
1460
1461 machine__set_kernel_mmap(machine, start, end);
1462
1463 maps__insert(&machine->kmaps, map);
1464 map__put(map);
1465 }
1466
1467 int machine__create_kernel_maps(struct machine *machine)
1468 {
1469 struct dso *kernel = machine__get_kernel(machine);
1470 const char *name = NULL;
1471 struct map *map;
1472 u64 start = 0, end = ~0ULL;
1473 int ret;
1474
1475 if (kernel == NULL)
1476 return -1;
1477
1478 ret = __machine__create_kernel_maps(machine, kernel);
1479 if (ret < 0)
1480 goto out_put;
1481
1482 if (symbol_conf.use_modules && machine__create_modules(machine) < 0) {
1483 if (machine__is_host(machine))
1484 pr_debug("Problems creating module maps, "
1485 "continuing anyway...\n");
1486 else
1487 pr_debug("Problems creating module maps for guest %d, "
1488 "continuing anyway...\n", machine->pid);
1489 }
1490
1491 if (!machine__get_running_kernel_start(machine, &name, &start, &end)) {
1492 if (name &&
1493 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map, name, start)) {
1494 machine__destroy_kernel_maps(machine);
1495 ret = -1;
1496 goto out_put;
1497 }
1498
1499 /*
1500 * we have a real start address now, so re-order the kmaps
1501 * assume it's the last in the kmaps
1502 */
1503 machine__update_kernel_mmap(machine, start, end);
1504 }
1505
1506 if (machine__create_extra_kernel_maps(machine, kernel))
1507 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1508
1509 if (end == ~0ULL) {
1510 /* update end address of the kernel map using adjacent module address */
1511 map = map__next(machine__kernel_map(machine));
1512 if (map)
1513 machine__set_kernel_mmap(machine, start, map->start);
1514 }
1515
1516 out_put:
1517 dso__put(kernel);
1518 return ret;
1519 }
1520
1521 static bool machine__uses_kcore(struct machine *machine)
1522 {
1523 struct dso *dso;
1524
1525 list_for_each_entry(dso, &machine->dsos.head, node) {
1526 if (dso__is_kcore(dso))
1527 return true;
1528 }
1529
1530 return false;
1531 }
1532
1533 static bool perf_event__is_extra_kernel_mmap(struct machine *machine,
1534 union perf_event *event)
1535 {
1536 return machine__is(machine, "x86_64") &&
1537 is_entry_trampoline(event->mmap.filename);
1538 }
1539
1540 static int machine__process_extra_kernel_map(struct machine *machine,
1541 union perf_event *event)
1542 {
1543 struct dso *kernel = machine__kernel_dso(machine);
1544 struct extra_kernel_map xm = {
1545 .start = event->mmap.start,
1546 .end = event->mmap.start + event->mmap.len,
1547 .pgoff = event->mmap.pgoff,
1548 };
1549
1550 if (kernel == NULL)
1551 return -1;
1552
1553 strlcpy(xm.name, event->mmap.filename, KMAP_NAME_LEN);
1554
1555 return machine__create_extra_kernel_map(machine, kernel, &xm);
1556 }
1557
1558 static int machine__process_kernel_mmap_event(struct machine *machine,
1559 union perf_event *event)
1560 {
1561 struct map *map;
1562 enum dso_kernel_type kernel_type;
1563 bool is_kernel_mmap;
1564
1565 /* If we have maps from kcore then we do not need or want any others */
1566 if (machine__uses_kcore(machine))
1567 return 0;
1568
1569 if (machine__is_host(machine))
1570 kernel_type = DSO_TYPE_KERNEL;
1571 else
1572 kernel_type = DSO_TYPE_GUEST_KERNEL;
1573
1574 is_kernel_mmap = memcmp(event->mmap.filename,
1575 machine->mmap_name,
1576 strlen(machine->mmap_name) - 1) == 0;
1577 if (event->mmap.filename[0] == '/' ||
1578 (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
1579 map = machine__addnew_module_map(machine, event->mmap.start,
1580 event->mmap.filename);
1581 if (map == NULL)
1582 goto out_problem;
1583
1584 map->end = map->start + event->mmap.len;
1585 } else if (is_kernel_mmap) {
1586 const char *symbol_name = (event->mmap.filename +
1587 strlen(machine->mmap_name));
1588 /*
1589 * Should be there already, from the build-id table in
1590 * the header.
1591 */
1592 struct dso *kernel = NULL;
1593 struct dso *dso;
1594
1595 down_read(&machine->dsos.lock);
1596
1597 list_for_each_entry(dso, &machine->dsos.head, node) {
1598
1599 /*
1600 * The cpumode passed to is_kernel_module is not the
1601 * cpumode of *this* event. If we insist on passing
1602 * correct cpumode to is_kernel_module, we should
1603 * record the cpumode when we adding this dso to the
1604 * linked list.
1605 *
1606 * However we don't really need passing correct
1607 * cpumode. We know the correct cpumode must be kernel
1608 * mode (if not, we should not link it onto kernel_dsos
1609 * list).
1610 *
1611 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1612 * is_kernel_module() treats it as a kernel cpumode.
1613 */
1614
1615 if (!dso->kernel ||
1616 is_kernel_module(dso->long_name,
1617 PERF_RECORD_MISC_CPUMODE_UNKNOWN))
1618 continue;
1619
1620
1621 kernel = dso;
1622 break;
1623 }
1624
1625 up_read(&machine->dsos.lock);
1626
1627 if (kernel == NULL)
1628 kernel = machine__findnew_dso(machine, machine->mmap_name);
1629 if (kernel == NULL)
1630 goto out_problem;
1631
1632 kernel->kernel = kernel_type;
1633 if (__machine__create_kernel_maps(machine, kernel) < 0) {
1634 dso__put(kernel);
1635 goto out_problem;
1636 }
1637
1638 if (strstr(kernel->long_name, "vmlinux"))
1639 dso__set_short_name(kernel, "[kernel.vmlinux]", false);
1640
1641 machine__update_kernel_mmap(machine, event->mmap.start,
1642 event->mmap.start + event->mmap.len);
1643
1644 /*
1645 * Avoid using a zero address (kptr_restrict) for the ref reloc
1646 * symbol. Effectively having zero here means that at record
1647 * time /proc/sys/kernel/kptr_restrict was non zero.
1648 */
1649 if (event->mmap.pgoff != 0) {
1650 map__set_kallsyms_ref_reloc_sym(machine->vmlinux_map,
1651 symbol_name,
1652 event->mmap.pgoff);
1653 }
1654
1655 if (machine__is_default_guest(machine)) {
1656 /*
1657 * preload dso of guest kernel and modules
1658 */
1659 dso__load(kernel, machine__kernel_map(machine));
1660 }
1661 } else if (perf_event__is_extra_kernel_mmap(machine, event)) {
1662 return machine__process_extra_kernel_map(machine, event);
1663 }
1664 return 0;
1665 out_problem:
1666 return -1;
1667 }
1668
1669 int machine__process_mmap2_event(struct machine *machine,
1670 union perf_event *event,
1671 struct perf_sample *sample)
1672 {
1673 struct thread *thread;
1674 struct map *map;
1675 struct dso_id dso_id = {
1676 .maj = event->mmap2.maj,
1677 .min = event->mmap2.min,
1678 .ino = event->mmap2.ino,
1679 .ino_generation = event->mmap2.ino_generation,
1680 };
1681 int ret = 0;
1682
1683 if (dump_trace)
1684 perf_event__fprintf_mmap2(event, stdout);
1685
1686 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1687 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1688 ret = machine__process_kernel_mmap_event(machine, event);
1689 if (ret < 0)
1690 goto out_problem;
1691 return 0;
1692 }
1693
1694 thread = machine__findnew_thread(machine, event->mmap2.pid,
1695 event->mmap2.tid);
1696 if (thread == NULL)
1697 goto out_problem;
1698
1699 map = map__new(machine, event->mmap2.start,
1700 event->mmap2.len, event->mmap2.pgoff,
1701 &dso_id, event->mmap2.prot,
1702 event->mmap2.flags,
1703 event->mmap2.filename, thread);
1704
1705 if (map == NULL)
1706 goto out_problem_map;
1707
1708 ret = thread__insert_map(thread, map);
1709 if (ret)
1710 goto out_problem_insert;
1711
1712 thread__put(thread);
1713 map__put(map);
1714 return 0;
1715
1716 out_problem_insert:
1717 map__put(map);
1718 out_problem_map:
1719 thread__put(thread);
1720 out_problem:
1721 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1722 return 0;
1723 }
1724
1725 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
1726 struct perf_sample *sample)
1727 {
1728 struct thread *thread;
1729 struct map *map;
1730 u32 prot = 0;
1731 int ret = 0;
1732
1733 if (dump_trace)
1734 perf_event__fprintf_mmap(event, stdout);
1735
1736 if (sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL ||
1737 sample->cpumode == PERF_RECORD_MISC_KERNEL) {
1738 ret = machine__process_kernel_mmap_event(machine, event);
1739 if (ret < 0)
1740 goto out_problem;
1741 return 0;
1742 }
1743
1744 thread = machine__findnew_thread(machine, event->mmap.pid,
1745 event->mmap.tid);
1746 if (thread == NULL)
1747 goto out_problem;
1748
1749 if (!(event->header.misc & PERF_RECORD_MISC_MMAP_DATA))
1750 prot = PROT_EXEC;
1751
1752 map = map__new(machine, event->mmap.start,
1753 event->mmap.len, event->mmap.pgoff,
1754 NULL, prot, 0, event->mmap.filename, thread);
1755
1756 if (map == NULL)
1757 goto out_problem_map;
1758
1759 ret = thread__insert_map(thread, map);
1760 if (ret)
1761 goto out_problem_insert;
1762
1763 thread__put(thread);
1764 map__put(map);
1765 return 0;
1766
1767 out_problem_insert:
1768 map__put(map);
1769 out_problem_map:
1770 thread__put(thread);
1771 out_problem:
1772 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1773 return 0;
1774 }
1775
1776 static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
1777 {
1778 struct threads *threads = machine__threads(machine, th->tid);
1779
1780 if (threads->last_match == th)
1781 threads__set_last_match(threads, NULL);
1782
1783 if (lock)
1784 down_write(&threads->lock);
1785
1786 BUG_ON(refcount_read(&th->refcnt) == 0);
1787
1788 rb_erase_cached(&th->rb_node, &threads->entries);
1789 RB_CLEAR_NODE(&th->rb_node);
1790 --threads->nr;
1791 /*
1792 * Move it first to the dead_threads list, then drop the reference,
1793 * if this is the last reference, then the thread__delete destructor
1794 * will be called and we will remove it from the dead_threads list.
1795 */
1796 list_add_tail(&th->node, &threads->dead);
1797
1798 /*
1799 * We need to do the put here because if this is the last refcount,
1800 * then we will be touching the threads->dead head when removing the
1801 * thread.
1802 */
1803 thread__put(th);
1804
1805 if (lock)
1806 up_write(&threads->lock);
1807 }
1808
1809 void machine__remove_thread(struct machine *machine, struct thread *th)
1810 {
1811 return __machine__remove_thread(machine, th, true);
1812 }
1813
1814 int machine__process_fork_event(struct machine *machine, union perf_event *event,
1815 struct perf_sample *sample)
1816 {
1817 struct thread *thread = machine__find_thread(machine,
1818 event->fork.pid,
1819 event->fork.tid);
1820 struct thread *parent = machine__findnew_thread(machine,
1821 event->fork.ppid,
1822 event->fork.ptid);
1823 bool do_maps_clone = true;
1824 int err = 0;
1825
1826 if (dump_trace)
1827 perf_event__fprintf_task(event, stdout);
1828
1829 /*
1830 * There may be an existing thread that is not actually the parent,
1831 * either because we are processing events out of order, or because the
1832 * (fork) event that would have removed the thread was lost. Assume the
1833 * latter case and continue on as best we can.
1834 */
1835 if (parent->pid_ != (pid_t)event->fork.ppid) {
1836 dump_printf("removing erroneous parent thread %d/%d\n",
1837 parent->pid_, parent->tid);
1838 machine__remove_thread(machine, parent);
1839 thread__put(parent);
1840 parent = machine__findnew_thread(machine, event->fork.ppid,
1841 event->fork.ptid);
1842 }
1843
1844 /* if a thread currently exists for the thread id remove it */
1845 if (thread != NULL) {
1846 machine__remove_thread(machine, thread);
1847 thread__put(thread);
1848 }
1849
1850 thread = machine__findnew_thread(machine, event->fork.pid,
1851 event->fork.tid);
1852 /*
1853 * When synthesizing FORK events, we are trying to create thread
1854 * objects for the already running tasks on the machine.
1855 *
1856 * Normally, for a kernel FORK event, we want to clone the parent's
1857 * maps because that is what the kernel just did.
1858 *
1859 * But when synthesizing, this should not be done. If we do, we end up
1860 * with overlapping maps as we process the sythesized MMAP2 events that
1861 * get delivered shortly thereafter.
1862 *
1863 * Use the FORK event misc flags in an internal way to signal this
1864 * situation, so we can elide the map clone when appropriate.
1865 */
1866 if (event->fork.header.misc & PERF_RECORD_MISC_FORK_EXEC)
1867 do_maps_clone = false;
1868
1869 if (thread == NULL || parent == NULL ||
1870 thread__fork(thread, parent, sample->time, do_maps_clone) < 0) {
1871 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1872 err = -1;
1873 }
1874 thread__put(thread);
1875 thread__put(parent);
1876
1877 return err;
1878 }
1879
1880 int machine__process_exit_event(struct machine *machine, union perf_event *event,
1881 struct perf_sample *sample __maybe_unused)
1882 {
1883 struct thread *thread = machine__find_thread(machine,
1884 event->fork.pid,
1885 event->fork.tid);
1886
1887 if (dump_trace)
1888 perf_event__fprintf_task(event, stdout);
1889
1890 if (thread != NULL) {
1891 thread__exited(thread);
1892 thread__put(thread);
1893 }
1894
1895 return 0;
1896 }
1897
1898 int machine__process_event(struct machine *machine, union perf_event *event,
1899 struct perf_sample *sample)
1900 {
1901 int ret;
1902
1903 switch (event->header.type) {
1904 case PERF_RECORD_COMM:
1905 ret = machine__process_comm_event(machine, event, sample); break;
1906 case PERF_RECORD_MMAP:
1907 ret = machine__process_mmap_event(machine, event, sample); break;
1908 case PERF_RECORD_NAMESPACES:
1909 ret = machine__process_namespaces_event(machine, event, sample); break;
1910 case PERF_RECORD_CGROUP:
1911 ret = machine__process_cgroup_event(machine, event, sample); break;
1912 case PERF_RECORD_MMAP2:
1913 ret = machine__process_mmap2_event(machine, event, sample); break;
1914 case PERF_RECORD_FORK:
1915 ret = machine__process_fork_event(machine, event, sample); break;
1916 case PERF_RECORD_EXIT:
1917 ret = machine__process_exit_event(machine, event, sample); break;
1918 case PERF_RECORD_LOST:
1919 ret = machine__process_lost_event(machine, event, sample); break;
1920 case PERF_RECORD_AUX:
1921 ret = machine__process_aux_event(machine, event); break;
1922 case PERF_RECORD_ITRACE_START:
1923 ret = machine__process_itrace_start_event(machine, event); break;
1924 case PERF_RECORD_LOST_SAMPLES:
1925 ret = machine__process_lost_samples_event(machine, event, sample); break;
1926 case PERF_RECORD_SWITCH:
1927 case PERF_RECORD_SWITCH_CPU_WIDE:
1928 ret = machine__process_switch_event(machine, event); break;
1929 case PERF_RECORD_KSYMBOL:
1930 ret = machine__process_ksymbol(machine, event, sample); break;
1931 case PERF_RECORD_BPF_EVENT:
1932 ret = machine__process_bpf(machine, event, sample); break;
1933 default:
1934 ret = -1;
1935 break;
1936 }
1937
1938 return ret;
1939 }
1940
1941 static bool symbol__match_regex(struct symbol *sym, regex_t *regex)
1942 {
1943 if (!regexec(regex, sym->name, 0, NULL, 0))
1944 return 1;
1945 return 0;
1946 }
1947
1948 static void ip__resolve_ams(struct thread *thread,
1949 struct addr_map_symbol *ams,
1950 u64 ip)
1951 {
1952 struct addr_location al;
1953
1954 memset(&al, 0, sizeof(al));
1955 /*
1956 * We cannot use the header.misc hint to determine whether a
1957 * branch stack address is user, kernel, guest, hypervisor.
1958 * Branches may straddle the kernel/user/hypervisor boundaries.
1959 * Thus, we have to try consecutively until we find a match
1960 * or else, the symbol is unknown
1961 */
1962 thread__find_cpumode_addr_location(thread, ip, &al);
1963
1964 ams->addr = ip;
1965 ams->al_addr = al.addr;
1966 ams->ms.maps = al.maps;
1967 ams->ms.sym = al.sym;
1968 ams->ms.map = al.map;
1969 ams->phys_addr = 0;
1970 }
1971
1972 static void ip__resolve_data(struct thread *thread,
1973 u8 m, struct addr_map_symbol *ams,
1974 u64 addr, u64 phys_addr)
1975 {
1976 struct addr_location al;
1977
1978 memset(&al, 0, sizeof(al));
1979
1980 thread__find_symbol(thread, m, addr, &al);
1981
1982 ams->addr = addr;
1983 ams->al_addr = al.addr;
1984 ams->ms.maps = al.maps;
1985 ams->ms.sym = al.sym;
1986 ams->ms.map = al.map;
1987 ams->phys_addr = phys_addr;
1988 }
1989
1990 struct mem_info *sample__resolve_mem(struct perf_sample *sample,
1991 struct addr_location *al)
1992 {
1993 struct mem_info *mi = mem_info__new();
1994
1995 if (!mi)
1996 return NULL;
1997
1998 ip__resolve_ams(al->thread, &mi->iaddr, sample->ip);
1999 ip__resolve_data(al->thread, al->cpumode, &mi->daddr,
2000 sample->addr, sample->phys_addr);
2001 mi->data_src.val = sample->data_src;
2002
2003 return mi;
2004 }
2005
2006 static char *callchain_srcline(struct map_symbol *ms, u64 ip)
2007 {
2008 struct map *map = ms->map;
2009 char *srcline = NULL;
2010
2011 if (!map || callchain_param.key == CCKEY_FUNCTION)
2012 return srcline;
2013
2014 srcline = srcline__tree_find(&map->dso->srclines, ip);
2015 if (!srcline) {
2016 bool show_sym = false;
2017 bool show_addr = callchain_param.key == CCKEY_ADDRESS;
2018
2019 srcline = get_srcline(map->dso, map__rip_2objdump(map, ip),
2020 ms->sym, show_sym, show_addr, ip);
2021 srcline__tree_insert(&map->dso->srclines, ip, srcline);
2022 }
2023
2024 return srcline;
2025 }
2026
2027 struct iterations {
2028 int nr_loop_iter;
2029 u64 cycles;
2030 };
2031
2032 static int add_callchain_ip(struct thread *thread,
2033 struct callchain_cursor *cursor,
2034 struct symbol **parent,
2035 struct addr_location *root_al,
2036 u8 *cpumode,
2037 u64 ip,
2038 bool branch,
2039 struct branch_flags *flags,
2040 struct iterations *iter,
2041 u64 branch_from)
2042 {
2043 struct map_symbol ms;
2044 struct addr_location al;
2045 int nr_loop_iter = 0;
2046 u64 iter_cycles = 0;
2047 const char *srcline = NULL;
2048
2049 al.filtered = 0;
2050 al.sym = NULL;
2051 if (!cpumode) {
2052 thread__find_cpumode_addr_location(thread, ip, &al);
2053 } else {
2054 if (ip >= PERF_CONTEXT_MAX) {
2055 switch (ip) {
2056 case PERF_CONTEXT_HV:
2057 *cpumode = PERF_RECORD_MISC_HYPERVISOR;
2058 break;
2059 case PERF_CONTEXT_KERNEL:
2060 *cpumode = PERF_RECORD_MISC_KERNEL;
2061 break;
2062 case PERF_CONTEXT_USER:
2063 *cpumode = PERF_RECORD_MISC_USER;
2064 break;
2065 default:
2066 pr_debug("invalid callchain context: "
2067 "%"PRId64"\n", (s64) ip);
2068 /*
2069 * It seems the callchain is corrupted.
2070 * Discard all.
2071 */
2072 callchain_cursor_reset(cursor);
2073 return 1;
2074 }
2075 return 0;
2076 }
2077 thread__find_symbol(thread, *cpumode, ip, &al);
2078 }
2079
2080 if (al.sym != NULL) {
2081 if (perf_hpp_list.parent && !*parent &&
2082 symbol__match_regex(al.sym, &parent_regex))
2083 *parent = al.sym;
2084 else if (have_ignore_callees && root_al &&
2085 symbol__match_regex(al.sym, &ignore_callees_regex)) {
2086 /* Treat this symbol as the root,
2087 forgetting its callees. */
2088 *root_al = al;
2089 callchain_cursor_reset(cursor);
2090 }
2091 }
2092
2093 if (symbol_conf.hide_unresolved && al.sym == NULL)
2094 return 0;
2095
2096 if (iter) {
2097 nr_loop_iter = iter->nr_loop_iter;
2098 iter_cycles = iter->cycles;
2099 }
2100
2101 ms.maps = al.maps;
2102 ms.map = al.map;
2103 ms.sym = al.sym;
2104 srcline = callchain_srcline(&ms, al.addr);
2105 return callchain_cursor_append(cursor, ip, &ms,
2106 branch, flags, nr_loop_iter,
2107 iter_cycles, branch_from, srcline);
2108 }
2109
2110 struct branch_info *sample__resolve_bstack(struct perf_sample *sample,
2111 struct addr_location *al)
2112 {
2113 unsigned int i;
2114 const struct branch_stack *bs = sample->branch_stack;
2115 struct branch_entry *entries = perf_sample__branch_entries(sample);
2116 struct branch_info *bi = calloc(bs->nr, sizeof(struct branch_info));
2117
2118 if (!bi)
2119 return NULL;
2120
2121 for (i = 0; i < bs->nr; i++) {
2122 ip__resolve_ams(al->thread, &bi[i].to, entries[i].to);
2123 ip__resolve_ams(al->thread, &bi[i].from, entries[i].from);
2124 bi[i].flags = entries[i].flags;
2125 }
2126 return bi;
2127 }
2128
2129 static void save_iterations(struct iterations *iter,
2130 struct branch_entry *be, int nr)
2131 {
2132 int i;
2133
2134 iter->nr_loop_iter++;
2135 iter->cycles = 0;
2136
2137 for (i = 0; i < nr; i++)
2138 iter->cycles += be[i].flags.cycles;
2139 }
2140
2141 #define CHASHSZ 127
2142 #define CHASHBITS 7
2143 #define NO_ENTRY 0xff
2144
2145 #define PERF_MAX_BRANCH_DEPTH 127
2146
2147 /* Remove loops. */
2148 static int remove_loops(struct branch_entry *l, int nr,
2149 struct iterations *iter)
2150 {
2151 int i, j, off;
2152 unsigned char chash[CHASHSZ];
2153
2154 memset(chash, NO_ENTRY, sizeof(chash));
2155
2156 BUG_ON(PERF_MAX_BRANCH_DEPTH > 255);
2157
2158 for (i = 0; i < nr; i++) {
2159 int h = hash_64(l[i].from, CHASHBITS) % CHASHSZ;
2160
2161 /* no collision handling for now */
2162 if (chash[h] == NO_ENTRY) {
2163 chash[h] = i;
2164 } else if (l[chash[h]].from == l[i].from) {
2165 bool is_loop = true;
2166 /* check if it is a real loop */
2167 off = 0;
2168 for (j = chash[h]; j < i && i + off < nr; j++, off++)
2169 if (l[j].from != l[i + off].from) {
2170 is_loop = false;
2171 break;
2172 }
2173 if (is_loop) {
2174 j = nr - (i + off);
2175 if (j > 0) {
2176 save_iterations(iter + i + off,
2177 l + i, off);
2178
2179 memmove(iter + i, iter + i + off,
2180 j * sizeof(*iter));
2181
2182 memmove(l + i, l + i + off,
2183 j * sizeof(*l));
2184 }
2185
2186 nr -= off;
2187 }
2188 }
2189 }
2190 return nr;
2191 }
2192
2193 static int lbr_callchain_add_kernel_ip(struct thread *thread,
2194 struct callchain_cursor *cursor,
2195 struct perf_sample *sample,
2196 struct symbol **parent,
2197 struct addr_location *root_al,
2198 u64 branch_from,
2199 bool callee, int end)
2200 {
2201 struct ip_callchain *chain = sample->callchain;
2202 u8 cpumode = PERF_RECORD_MISC_USER;
2203 int err, i;
2204
2205 if (callee) {
2206 for (i = 0; i < end + 1; i++) {
2207 err = add_callchain_ip(thread, cursor, parent,
2208 root_al, &cpumode, chain->ips[i],
2209 false, NULL, NULL, branch_from);
2210 if (err)
2211 return err;
2212 }
2213 return 0;
2214 }
2215
2216 for (i = end; i >= 0; i--) {
2217 err = add_callchain_ip(thread, cursor, parent,
2218 root_al, &cpumode, chain->ips[i],
2219 false, NULL, NULL, branch_from);
2220 if (err)
2221 return err;
2222 }
2223
2224 return 0;
2225 }
2226
2227 static void save_lbr_cursor_node(struct thread *thread,
2228 struct callchain_cursor *cursor,
2229 int idx)
2230 {
2231 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2232
2233 if (!lbr_stitch)
2234 return;
2235
2236 if (cursor->pos == cursor->nr) {
2237 lbr_stitch->prev_lbr_cursor[idx].valid = false;
2238 return;
2239 }
2240
2241 if (!cursor->curr)
2242 cursor->curr = cursor->first;
2243 else
2244 cursor->curr = cursor->curr->next;
2245 memcpy(&lbr_stitch->prev_lbr_cursor[idx], cursor->curr,
2246 sizeof(struct callchain_cursor_node));
2247
2248 lbr_stitch->prev_lbr_cursor[idx].valid = true;
2249 cursor->pos++;
2250 }
2251
2252 static int lbr_callchain_add_lbr_ip(struct thread *thread,
2253 struct callchain_cursor *cursor,
2254 struct perf_sample *sample,
2255 struct symbol **parent,
2256 struct addr_location *root_al,
2257 u64 *branch_from,
2258 bool callee)
2259 {
2260 struct branch_stack *lbr_stack = sample->branch_stack;
2261 struct branch_entry *entries = perf_sample__branch_entries(sample);
2262 u8 cpumode = PERF_RECORD_MISC_USER;
2263 int lbr_nr = lbr_stack->nr;
2264 struct branch_flags *flags;
2265 int err, i;
2266 u64 ip;
2267
2268 /*
2269 * The curr and pos are not used in writing session. They are cleared
2270 * in callchain_cursor_commit() when the writing session is closed.
2271 * Using curr and pos to track the current cursor node.
2272 */
2273 if (thread->lbr_stitch) {
2274 cursor->curr = NULL;
2275 cursor->pos = cursor->nr;
2276 if (cursor->nr) {
2277 cursor->curr = cursor->first;
2278 for (i = 0; i < (int)(cursor->nr - 1); i++)
2279 cursor->curr = cursor->curr->next;
2280 }
2281 }
2282
2283 if (callee) {
2284 /* Add LBR ip from first entries.to */
2285 ip = entries[0].to;
2286 flags = &entries[0].flags;
2287 *branch_from = entries[0].from;
2288 err = add_callchain_ip(thread, cursor, parent,
2289 root_al, &cpumode, ip,
2290 true, flags, NULL,
2291 *branch_from);
2292 if (err)
2293 return err;
2294
2295 /*
2296 * The number of cursor node increases.
2297 * Move the current cursor node.
2298 * But does not need to save current cursor node for entry 0.
2299 * It's impossible to stitch the whole LBRs of previous sample.
2300 */
2301 if (thread->lbr_stitch && (cursor->pos != cursor->nr)) {
2302 if (!cursor->curr)
2303 cursor->curr = cursor->first;
2304 else
2305 cursor->curr = cursor->curr->next;
2306 cursor->pos++;
2307 }
2308
2309 /* Add LBR ip from entries.from one by one. */
2310 for (i = 0; i < lbr_nr; i++) {
2311 ip = entries[i].from;
2312 flags = &entries[i].flags;
2313 err = add_callchain_ip(thread, cursor, parent,
2314 root_al, &cpumode, ip,
2315 true, flags, NULL,
2316 *branch_from);
2317 if (err)
2318 return err;
2319 save_lbr_cursor_node(thread, cursor, i);
2320 }
2321 return 0;
2322 }
2323
2324 /* Add LBR ip from entries.from one by one. */
2325 for (i = lbr_nr - 1; i >= 0; i--) {
2326 ip = entries[i].from;
2327 flags = &entries[i].flags;
2328 err = add_callchain_ip(thread, cursor, parent,
2329 root_al, &cpumode, ip,
2330 true, flags, NULL,
2331 *branch_from);
2332 if (err)
2333 return err;
2334 save_lbr_cursor_node(thread, cursor, i);
2335 }
2336
2337 /* Add LBR ip from first entries.to */
2338 ip = entries[0].to;
2339 flags = &entries[0].flags;
2340 *branch_from = entries[0].from;
2341 err = add_callchain_ip(thread, cursor, parent,
2342 root_al, &cpumode, ip,
2343 true, flags, NULL,
2344 *branch_from);
2345 if (err)
2346 return err;
2347
2348 return 0;
2349 }
2350
2351 static int lbr_callchain_add_stitched_lbr_ip(struct thread *thread,
2352 struct callchain_cursor *cursor)
2353 {
2354 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2355 struct callchain_cursor_node *cnode;
2356 struct stitch_list *stitch_node;
2357 int err;
2358
2359 list_for_each_entry(stitch_node, &lbr_stitch->lists, node) {
2360 cnode = &stitch_node->cursor;
2361
2362 err = callchain_cursor_append(cursor, cnode->ip,
2363 &cnode->ms,
2364 cnode->branch,
2365 &cnode->branch_flags,
2366 cnode->nr_loop_iter,
2367 cnode->iter_cycles,
2368 cnode->branch_from,
2369 cnode->srcline);
2370 if (err)
2371 return err;
2372 }
2373 return 0;
2374 }
2375
2376 static struct stitch_list *get_stitch_node(struct thread *thread)
2377 {
2378 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2379 struct stitch_list *stitch_node;
2380
2381 if (!list_empty(&lbr_stitch->free_lists)) {
2382 stitch_node = list_first_entry(&lbr_stitch->free_lists,
2383 struct stitch_list, node);
2384 list_del(&stitch_node->node);
2385
2386 return stitch_node;
2387 }
2388
2389 return malloc(sizeof(struct stitch_list));
2390 }
2391
2392 static bool has_stitched_lbr(struct thread *thread,
2393 struct perf_sample *cur,
2394 struct perf_sample *prev,
2395 unsigned int max_lbr,
2396 bool callee)
2397 {
2398 struct branch_stack *cur_stack = cur->branch_stack;
2399 struct branch_entry *cur_entries = perf_sample__branch_entries(cur);
2400 struct branch_stack *prev_stack = prev->branch_stack;
2401 struct branch_entry *prev_entries = perf_sample__branch_entries(prev);
2402 struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
2403 int i, j, nr_identical_branches = 0;
2404 struct stitch_list *stitch_node;
2405 u64 cur_base, distance;
2406
2407 if (!cur_stack || !prev_stack)
2408 return false;
2409
2410 /* Find the physical index of the base-of-stack for current sample. */
2411 cur_base = max_lbr - cur_stack->nr + cur_stack->hw_idx + 1;
2412
2413 distance = (prev_stack->hw_idx > cur_base) ? (prev_stack->hw_idx - cur_base) :
2414 (max_lbr + prev_stack->hw_idx - cur_base);
2415 /* Previous sample has shorter stack. Nothing can be stitched. */
2416 if (distance + 1 > prev_stack->nr)
2417 return false;
2418
2419 /*
2420 * Check if there are identical LBRs between two samples.
2421 * Identicall LBRs must have same from, to and flags values. Also,
2422 * they have to be saved in the same LBR registers (same physical
2423 * index).
2424 *
2425 * Starts from the base-of-stack of current sample.
2426 */
2427 for (i = distance, j = cur_stack->nr - 1; (i >= 0) && (j >= 0); i--, j--) {
2428 if ((prev_entries[i].from != cur_entries[j].from) ||
2429 (prev_entries[i].to != cur_entries[j].to) ||
2430 (prev_entries[i].flags.value != cur_entries[j].flags.value))
2431 break;
2432 nr_identical_branches++;
2433 }
2434
2435 if (!nr_identical_branches)
2436 return false;
2437
2438 /*
2439 * Save the LBRs between the base-of-stack of previous sample
2440 * and the base-of-stack of current sample into lbr_stitch->lists.
2441 * These LBRs will be stitched later.
2442 */
2443 for (i = prev_stack->nr - 1; i > (int)distance; i--) {
2444
2445 if (!lbr_stitch->prev_lbr_cursor[i].valid)
2446 continue;
2447
2448 stitch_node = get_stitch_node(thread);
2449 if (!stitch_node)
2450 return false;
2451
2452 memcpy(&stitch_node->cursor, &lbr_stitch->prev_lbr_cursor[i],
2453 sizeof(struct callchain_cursor_node));
2454
2455 if (callee)
2456 list_add(&stitch_node->node, &lbr_stitch->lists);
2457 else
2458 list_add_tail(&stitch_node->node, &lbr_stitch->lists);
2459 }
2460
2461 return true;
2462 }
2463
2464 static bool alloc_lbr_stitch(struct thread *thread, unsigned int max_lbr)
2465 {
2466 if (thread->lbr_stitch)
2467 return true;
2468
2469 thread->lbr_stitch = zalloc(sizeof(*thread->lbr_stitch));
2470 if (!thread->lbr_stitch)
2471 goto err;
2472
2473 thread->lbr_stitch->prev_lbr_cursor = calloc(max_lbr + 1, sizeof(struct callchain_cursor_node));
2474 if (!thread->lbr_stitch->prev_lbr_cursor)
2475 goto free_lbr_stitch;
2476
2477 INIT_LIST_HEAD(&thread->lbr_stitch->lists);
2478 INIT_LIST_HEAD(&thread->lbr_stitch->free_lists);
2479
2480 return true;
2481
2482 free_lbr_stitch:
2483 zfree(&thread->lbr_stitch);
2484 err:
2485 pr_warning("Failed to allocate space for stitched LBRs. Disable LBR stitch\n");
2486 thread->lbr_stitch_enable = false;
2487 return false;
2488 }
2489
2490 /*
2491 * Recolve LBR callstack chain sample
2492 * Return:
2493 * 1 on success get LBR callchain information
2494 * 0 no available LBR callchain information, should try fp
2495 * negative error code on other errors.
2496 */
2497 static int resolve_lbr_callchain_sample(struct thread *thread,
2498 struct callchain_cursor *cursor,
2499 struct perf_sample *sample,
2500 struct symbol **parent,
2501 struct addr_location *root_al,
2502 int max_stack,
2503 unsigned int max_lbr)
2504 {
2505 bool callee = (callchain_param.order == ORDER_CALLEE);
2506 struct ip_callchain *chain = sample->callchain;
2507 int chain_nr = min(max_stack, (int)chain->nr), i;
2508 struct lbr_stitch *lbr_stitch;
2509 bool stitched_lbr = false;
2510 u64 branch_from = 0;
2511 int err;
2512
2513 for (i = 0; i < chain_nr; i++) {
2514 if (chain->ips[i] == PERF_CONTEXT_USER)
2515 break;
2516 }
2517
2518 /* LBR only affects the user callchain */
2519 if (i == chain_nr)
2520 return 0;
2521
2522 if (thread->lbr_stitch_enable && !sample->no_hw_idx &&
2523 (max_lbr > 0) && alloc_lbr_stitch(thread, max_lbr)) {
2524 lbr_stitch = thread->lbr_stitch;
2525
2526 stitched_lbr = has_stitched_lbr(thread, sample,
2527 &lbr_stitch->prev_sample,
2528 max_lbr, callee);
2529
2530 if (!stitched_lbr && !list_empty(&lbr_stitch->lists)) {
2531 list_replace_init(&lbr_stitch->lists,
2532 &lbr_stitch->free_lists);
2533 }
2534 memcpy(&lbr_stitch->prev_sample, sample, sizeof(*sample));
2535 }
2536
2537 if (callee) {
2538 /* Add kernel ip */
2539 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2540 parent, root_al, branch_from,
2541 true, i);
2542 if (err)
2543 goto error;
2544
2545 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2546 root_al, &branch_from, true);
2547 if (err)
2548 goto error;
2549
2550 if (stitched_lbr) {
2551 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2552 if (err)
2553 goto error;
2554 }
2555
2556 } else {
2557 if (stitched_lbr) {
2558 err = lbr_callchain_add_stitched_lbr_ip(thread, cursor);
2559 if (err)
2560 goto error;
2561 }
2562 err = lbr_callchain_add_lbr_ip(thread, cursor, sample, parent,
2563 root_al, &branch_from, false);
2564 if (err)
2565 goto error;
2566
2567 /* Add kernel ip */
2568 err = lbr_callchain_add_kernel_ip(thread, cursor, sample,
2569 parent, root_al, branch_from,
2570 false, i);
2571 if (err)
2572 goto error;
2573 }
2574 return 1;
2575
2576 error:
2577 return (err < 0) ? err : 0;
2578 }
2579
2580 static int find_prev_cpumode(struct ip_callchain *chain, struct thread *thread,
2581 struct callchain_cursor *cursor,
2582 struct symbol **parent,
2583 struct addr_location *root_al,
2584 u8 *cpumode, int ent)
2585 {
2586 int err = 0;
2587
2588 while (--ent >= 0) {
2589 u64 ip = chain->ips[ent];
2590
2591 if (ip >= PERF_CONTEXT_MAX) {
2592 err = add_callchain_ip(thread, cursor, parent,
2593 root_al, cpumode, ip,
2594 false, NULL, NULL, 0);
2595 break;
2596 }
2597 }
2598 return err;
2599 }
2600
2601 static int thread__resolve_callchain_sample(struct thread *thread,
2602 struct callchain_cursor *cursor,
2603 struct evsel *evsel,
2604 struct perf_sample *sample,
2605 struct symbol **parent,
2606 struct addr_location *root_al,
2607 int max_stack)
2608 {
2609 struct branch_stack *branch = sample->branch_stack;
2610 struct branch_entry *entries = perf_sample__branch_entries(sample);
2611 struct ip_callchain *chain = sample->callchain;
2612 int chain_nr = 0;
2613 u8 cpumode = PERF_RECORD_MISC_USER;
2614 int i, j, err, nr_entries;
2615 int skip_idx = -1;
2616 int first_call = 0;
2617
2618 if (chain)
2619 chain_nr = chain->nr;
2620
2621 if (evsel__has_branch_callstack(evsel)) {
2622 struct perf_env *env = evsel__env(evsel);
2623
2624 err = resolve_lbr_callchain_sample(thread, cursor, sample, parent,
2625 root_al, max_stack,
2626 !env ? 0 : env->max_branches);
2627 if (err)
2628 return (err < 0) ? err : 0;
2629 }
2630
2631 /*
2632 * Based on DWARF debug information, some architectures skip
2633 * a callchain entry saved by the kernel.
2634 */
2635 skip_idx = arch_skip_callchain_idx(thread, chain);
2636
2637 /*
2638 * Add branches to call stack for easier browsing. This gives
2639 * more context for a sample than just the callers.
2640 *
2641 * This uses individual histograms of paths compared to the
2642 * aggregated histograms the normal LBR mode uses.
2643 *
2644 * Limitations for now:
2645 * - No extra filters
2646 * - No annotations (should annotate somehow)
2647 */
2648
2649 if (branch && callchain_param.branch_callstack) {
2650 int nr = min(max_stack, (int)branch->nr);
2651 struct branch_entry be[nr];
2652 struct iterations iter[nr];
2653
2654 if (branch->nr > PERF_MAX_BRANCH_DEPTH) {
2655 pr_warning("corrupted branch chain. skipping...\n");
2656 goto check_calls;
2657 }
2658
2659 for (i = 0; i < nr; i++) {
2660 if (callchain_param.order == ORDER_CALLEE) {
2661 be[i] = entries[i];
2662
2663 if (chain == NULL)
2664 continue;
2665
2666 /*
2667 * Check for overlap into the callchain.
2668 * The return address is one off compared to
2669 * the branch entry. To adjust for this
2670 * assume the calling instruction is not longer
2671 * than 8 bytes.
2672 */
2673 if (i == skip_idx ||
2674 chain->ips[first_call] >= PERF_CONTEXT_MAX)
2675 first_call++;
2676 else if (be[i].from < chain->ips[first_call] &&
2677 be[i].from >= chain->ips[first_call] - 8)
2678 first_call++;
2679 } else
2680 be[i] = entries[branch->nr - i - 1];
2681 }
2682
2683 memset(iter, 0, sizeof(struct iterations) * nr);
2684 nr = remove_loops(be, nr, iter);
2685
2686 for (i = 0; i < nr; i++) {
2687 err = add_callchain_ip(thread, cursor, parent,
2688 root_al,
2689 NULL, be[i].to,
2690 true, &be[i].flags,
2691 NULL, be[i].from);
2692
2693 if (!err)
2694 err = add_callchain_ip(thread, cursor, parent, root_al,
2695 NULL, be[i].from,
2696 true, &be[i].flags,
2697 &iter[i], 0);
2698 if (err == -EINVAL)
2699 break;
2700 if (err)
2701 return err;
2702 }
2703
2704 if (chain_nr == 0)
2705 return 0;
2706
2707 chain_nr -= nr;
2708 }
2709
2710 check_calls:
2711 if (chain && callchain_param.order != ORDER_CALLEE) {
2712 err = find_prev_cpumode(chain, thread, cursor, parent, root_al,
2713 &cpumode, chain->nr - first_call);
2714 if (err)
2715 return (err < 0) ? err : 0;
2716 }
2717 for (i = first_call, nr_entries = 0;
2718 i < chain_nr && nr_entries < max_stack; i++) {
2719 u64 ip;
2720
2721 if (callchain_param.order == ORDER_CALLEE)
2722 j = i;
2723 else
2724 j = chain->nr - i - 1;
2725
2726 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2727 if (j == skip_idx)
2728 continue;
2729 #endif
2730 ip = chain->ips[j];
2731 if (ip < PERF_CONTEXT_MAX)
2732 ++nr_entries;
2733 else if (callchain_param.order != ORDER_CALLEE) {
2734 err = find_prev_cpumode(chain, thread, cursor, parent,
2735 root_al, &cpumode, j);
2736 if (err)
2737 return (err < 0) ? err : 0;
2738 continue;
2739 }
2740
2741 err = add_callchain_ip(thread, cursor, parent,
2742 root_al, &cpumode, ip,
2743 false, NULL, NULL, 0);
2744
2745 if (err)
2746 return (err < 0) ? err : 0;
2747 }
2748
2749 return 0;
2750 }
2751
2752 static int append_inlines(struct callchain_cursor *cursor, struct map_symbol *ms, u64 ip)
2753 {
2754 struct symbol *sym = ms->sym;
2755 struct map *map = ms->map;
2756 struct inline_node *inline_node;
2757 struct inline_list *ilist;
2758 u64 addr;
2759 int ret = 1;
2760
2761 if (!symbol_conf.inline_name || !map || !sym)
2762 return ret;
2763
2764 addr = map__map_ip(map, ip);
2765 addr = map__rip_2objdump(map, addr);
2766
2767 inline_node = inlines__tree_find(&map->dso->inlined_nodes, addr);
2768 if (!inline_node) {
2769 inline_node = dso__parse_addr_inlines(map->dso, addr, sym);
2770 if (!inline_node)
2771 return ret;
2772 inlines__tree_insert(&map->dso->inlined_nodes, inline_node);
2773 }
2774
2775 list_for_each_entry(ilist, &inline_node->val, list) {
2776 struct map_symbol ilist_ms = {
2777 .maps = ms->maps,
2778 .map = map,
2779 .sym = ilist->symbol,
2780 };
2781 ret = callchain_cursor_append(cursor, ip, &ilist_ms, false,
2782 NULL, 0, 0, 0, ilist->srcline);
2783
2784 if (ret != 0)
2785 return ret;
2786 }
2787
2788 return ret;
2789 }
2790
2791 static int unwind_entry(struct unwind_entry *entry, void *arg)
2792 {
2793 struct callchain_cursor *cursor = arg;
2794 const char *srcline = NULL;
2795 u64 addr = entry->ip;
2796
2797 if (symbol_conf.hide_unresolved && entry->ms.sym == NULL)
2798 return 0;
2799
2800 if (append_inlines(cursor, &entry->ms, entry->ip) == 0)
2801 return 0;
2802
2803 /*
2804 * Convert entry->ip from a virtual address to an offset in
2805 * its corresponding binary.
2806 */
2807 if (entry->ms.map)
2808 addr = map__map_ip(entry->ms.map, entry->ip);
2809
2810 srcline = callchain_srcline(&entry->ms, addr);
2811 return callchain_cursor_append(cursor, entry->ip, &entry->ms,
2812 false, NULL, 0, 0, 0, srcline);
2813 }
2814
2815 static int thread__resolve_callchain_unwind(struct thread *thread,
2816 struct callchain_cursor *cursor,
2817 struct evsel *evsel,
2818 struct perf_sample *sample,
2819 int max_stack)
2820 {
2821 /* Can we do dwarf post unwind? */
2822 if (!((evsel->core.attr.sample_type & PERF_SAMPLE_REGS_USER) &&
2823 (evsel->core.attr.sample_type & PERF_SAMPLE_STACK_USER)))
2824 return 0;
2825
2826 /* Bail out if nothing was captured. */
2827 if ((!sample->user_regs.regs) ||
2828 (!sample->user_stack.size))
2829 return 0;
2830
2831 return unwind__get_entries(unwind_entry, cursor,
2832 thread, sample, max_stack);
2833 }
2834
2835 int thread__resolve_callchain(struct thread *thread,
2836 struct callchain_cursor *cursor,
2837 struct evsel *evsel,
2838 struct perf_sample *sample,
2839 struct symbol **parent,
2840 struct addr_location *root_al,
2841 int max_stack)
2842 {
2843 int ret = 0;
2844
2845 callchain_cursor_reset(cursor);
2846
2847 if (callchain_param.order == ORDER_CALLEE) {
2848 ret = thread__resolve_callchain_sample(thread, cursor,
2849 evsel, sample,
2850 parent, root_al,
2851 max_stack);
2852 if (ret)
2853 return ret;
2854 ret = thread__resolve_callchain_unwind(thread, cursor,
2855 evsel, sample,
2856 max_stack);
2857 } else {
2858 ret = thread__resolve_callchain_unwind(thread, cursor,
2859 evsel, sample,
2860 max_stack);
2861 if (ret)
2862 return ret;
2863 ret = thread__resolve_callchain_sample(thread, cursor,
2864 evsel, sample,
2865 parent, root_al,
2866 max_stack);
2867 }
2868
2869 return ret;
2870 }
2871
2872 int machine__for_each_thread(struct machine *machine,
2873 int (*fn)(struct thread *thread, void *p),
2874 void *priv)
2875 {
2876 struct threads *threads;
2877 struct rb_node *nd;
2878 struct thread *thread;
2879 int rc = 0;
2880 int i;
2881
2882 for (i = 0; i < THREADS__TABLE_SIZE; i++) {
2883 threads = &machine->threads[i];
2884 for (nd = rb_first_cached(&threads->entries); nd;
2885 nd = rb_next(nd)) {
2886 thread = rb_entry(nd, struct thread, rb_node);
2887 rc = fn(thread, priv);
2888 if (rc != 0)
2889 return rc;
2890 }
2891
2892 list_for_each_entry(thread, &threads->dead, node) {
2893 rc = fn(thread, priv);
2894 if (rc != 0)
2895 return rc;
2896 }
2897 }
2898 return rc;
2899 }
2900
2901 int machines__for_each_thread(struct machines *machines,
2902 int (*fn)(struct thread *thread, void *p),
2903 void *priv)
2904 {
2905 struct rb_node *nd;
2906 int rc = 0;
2907
2908 rc = machine__for_each_thread(&machines->host, fn, priv);
2909 if (rc != 0)
2910 return rc;
2911
2912 for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
2913 struct machine *machine = rb_entry(nd, struct machine, rb_node);
2914
2915 rc = machine__for_each_thread(machine, fn, priv);
2916 if (rc != 0)
2917 return rc;
2918 }
2919 return rc;
2920 }
2921
2922 pid_t machine__get_current_tid(struct machine *machine, int cpu)
2923 {
2924 int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2925
2926 if (cpu < 0 || cpu >= nr_cpus || !machine->current_tid)
2927 return -1;
2928
2929 return machine->current_tid[cpu];
2930 }
2931
2932 int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
2933 pid_t tid)
2934 {
2935 struct thread *thread;
2936 int nr_cpus = min(machine->env->nr_cpus_online, MAX_NR_CPUS);
2937
2938 if (cpu < 0)
2939 return -EINVAL;
2940
2941 if (!machine->current_tid) {
2942 int i;
2943
2944 machine->current_tid = calloc(nr_cpus, sizeof(pid_t));
2945 if (!machine->current_tid)
2946 return -ENOMEM;
2947 for (i = 0; i < nr_cpus; i++)
2948 machine->current_tid[i] = -1;
2949 }
2950
2951 if (cpu >= nr_cpus) {
2952 pr_err("Requested CPU %d too large. ", cpu);
2953 pr_err("Consider raising MAX_NR_CPUS\n");
2954 return -EINVAL;
2955 }
2956
2957 machine->current_tid[cpu] = tid;
2958
2959 thread = machine__findnew_thread(machine, pid, tid);
2960 if (!thread)
2961 return -ENOMEM;
2962
2963 thread->cpu = cpu;
2964 thread__put(thread);
2965
2966 return 0;
2967 }
2968
2969 /*
2970 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2971 * normalized arch is needed.
2972 */
2973 bool machine__is(struct machine *machine, const char *arch)
2974 {
2975 return machine && !strcmp(perf_env__raw_arch(machine->env), arch);
2976 }
2977
2978 int machine__nr_cpus_avail(struct machine *machine)
2979 {
2980 return machine ? perf_env__nr_cpus_avail(machine->env) : 0;
2981 }
2982
2983 int machine__get_kernel_start(struct machine *machine)
2984 {
2985 struct map *map = machine__kernel_map(machine);
2986 int err = 0;
2987
2988 /*
2989 * The only addresses above 2^63 are kernel addresses of a 64-bit
2990 * kernel. Note that addresses are unsigned so that on a 32-bit system
2991 * all addresses including kernel addresses are less than 2^32. In
2992 * that case (32-bit system), if the kernel mapping is unknown, all
2993 * addresses will be assumed to be in user space - see
2994 * machine__kernel_ip().
2995 */
2996 machine->kernel_start = 1ULL << 63;
2997 if (map) {
2998 err = map__load(map);
2999 /*
3000 * On x86_64, PTI entry trampolines are less than the
3001 * start of kernel text, but still above 2^63. So leave
3002 * kernel_start = 1ULL << 63 for x86_64.
3003 */
3004 if (!err && !machine__is(machine, "x86_64"))
3005 machine->kernel_start = map->start;
3006 }
3007 return err;
3008 }
3009
3010 u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr)
3011 {
3012 u8 addr_cpumode = cpumode;
3013 bool kernel_ip;
3014
3015 if (!machine->single_address_space)
3016 goto out;
3017
3018 kernel_ip = machine__kernel_ip(machine, addr);
3019 switch (cpumode) {
3020 case PERF_RECORD_MISC_KERNEL:
3021 case PERF_RECORD_MISC_USER:
3022 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL :
3023 PERF_RECORD_MISC_USER;
3024 break;
3025 case PERF_RECORD_MISC_GUEST_KERNEL:
3026 case PERF_RECORD_MISC_GUEST_USER:
3027 addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL :
3028 PERF_RECORD_MISC_GUEST_USER;
3029 break;
3030 default:
3031 break;
3032 }
3033 out:
3034 return addr_cpumode;
3035 }
3036
3037 struct dso *machine__findnew_dso_id(struct machine *machine, const char *filename, struct dso_id *id)
3038 {
3039 return dsos__findnew_id(&machine->dsos, filename, id);
3040 }
3041
3042 struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
3043 {
3044 return machine__findnew_dso_id(machine, filename, NULL);
3045 }
3046
3047 char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp)
3048 {
3049 struct machine *machine = vmachine;
3050 struct map *map;
3051 struct symbol *sym = machine__find_kernel_symbol(machine, *addrp, &map);
3052
3053 if (sym == NULL)
3054 return NULL;
3055
3056 *modp = __map__is_kmodule(map) ? (char *)map->dso->short_name : NULL;
3057 *addrp = map->unmap_ip(map, sym->start);
3058 return sym->name;
3059 }