1 // SPDX-License-Identifier: GPL-2.0
19 #include <sys/types.h>
23 #include "linux/hash.h"
25 #include "bpf-event.h"
27 #include <linux/ctype.h>
28 #include <symbol/kallsyms.h>
29 #include <linux/mman.h>
30 #include <linux/zalloc.h>
32 static void __machine__remove_thread(struct machine
*machine
, struct thread
*th
, bool lock
);
34 static void dsos__init(struct dsos
*dsos
)
36 INIT_LIST_HEAD(&dsos
->head
);
38 init_rwsem(&dsos
->lock
);
41 static void machine__threads_init(struct machine
*machine
)
45 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
46 struct threads
*threads
= &machine
->threads
[i
];
47 threads
->entries
= RB_ROOT_CACHED
;
48 init_rwsem(&threads
->lock
);
50 INIT_LIST_HEAD(&threads
->dead
);
51 threads
->last_match
= NULL
;
55 static int machine__set_mmap_name(struct machine
*machine
)
57 if (machine__is_host(machine
))
58 machine
->mmap_name
= strdup("[kernel.kallsyms]");
59 else if (machine__is_default_guest(machine
))
60 machine
->mmap_name
= strdup("[guest.kernel.kallsyms]");
61 else if (asprintf(&machine
->mmap_name
, "[guest.kernel.kallsyms.%d]",
63 machine
->mmap_name
= NULL
;
65 return machine
->mmap_name
? 0 : -ENOMEM
;
68 int machine__init(struct machine
*machine
, const char *root_dir
, pid_t pid
)
72 memset(machine
, 0, sizeof(*machine
));
73 map_groups__init(&machine
->kmaps
, machine
);
74 RB_CLEAR_NODE(&machine
->rb_node
);
75 dsos__init(&machine
->dsos
);
77 machine__threads_init(machine
);
79 machine
->vdso_info
= NULL
;
84 machine
->id_hdr_size
= 0;
85 machine
->kptr_restrict_warned
= false;
86 machine
->comm_exec
= false;
87 machine
->kernel_start
= 0;
88 machine
->vmlinux_map
= NULL
;
90 machine
->root_dir
= strdup(root_dir
);
91 if (machine
->root_dir
== NULL
)
94 if (machine__set_mmap_name(machine
))
97 if (pid
!= HOST_KERNEL_ID
) {
98 struct thread
*thread
= machine__findnew_thread(machine
, -1,
105 snprintf(comm
, sizeof(comm
), "[guest/%d]", pid
);
106 thread__set_comm(thread
, comm
, 0);
110 machine
->current_tid
= NULL
;
115 zfree(&machine
->root_dir
);
116 zfree(&machine
->mmap_name
);
121 struct machine
*machine__new_host(void)
123 struct machine
*machine
= malloc(sizeof(*machine
));
125 if (machine
!= NULL
) {
126 machine__init(machine
, "", HOST_KERNEL_ID
);
128 if (machine__create_kernel_maps(machine
) < 0)
138 struct machine
*machine__new_kallsyms(void)
140 struct machine
*machine
= machine__new_host();
143 * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly
144 * ask for not using the kcore parsing code, once this one is fixed
145 * to create a map per module.
147 if (machine
&& machine__load_kallsyms(machine
, "/proc/kallsyms") <= 0) {
148 machine__delete(machine
);
155 static void dsos__purge(struct dsos
*dsos
)
159 down_write(&dsos
->lock
);
161 list_for_each_entry_safe(pos
, n
, &dsos
->head
, node
) {
162 RB_CLEAR_NODE(&pos
->rb_node
);
164 list_del_init(&pos
->node
);
168 up_write(&dsos
->lock
);
171 static void dsos__exit(struct dsos
*dsos
)
174 exit_rwsem(&dsos
->lock
);
177 void machine__delete_threads(struct machine
*machine
)
182 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
183 struct threads
*threads
= &machine
->threads
[i
];
184 down_write(&threads
->lock
);
185 nd
= rb_first_cached(&threads
->entries
);
187 struct thread
*t
= rb_entry(nd
, struct thread
, rb_node
);
190 __machine__remove_thread(machine
, t
, false);
192 up_write(&threads
->lock
);
196 void machine__exit(struct machine
*machine
)
203 machine__destroy_kernel_maps(machine
);
204 map_groups__exit(&machine
->kmaps
);
205 dsos__exit(&machine
->dsos
);
206 machine__exit_vdso(machine
);
207 zfree(&machine
->root_dir
);
208 zfree(&machine
->mmap_name
);
209 zfree(&machine
->current_tid
);
211 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
212 struct threads
*threads
= &machine
->threads
[i
];
213 struct thread
*thread
, *n
;
215 * Forget about the dead, at this point whatever threads were
216 * left in the dead lists better have a reference count taken
217 * by who is using them, and then, when they drop those references
218 * and it finally hits zero, thread__put() will check and see that
219 * its not in the dead threads list and will not try to remove it
220 * from there, just calling thread__delete() straight away.
222 list_for_each_entry_safe(thread
, n
, &threads
->dead
, node
)
223 list_del_init(&thread
->node
);
225 exit_rwsem(&threads
->lock
);
229 void machine__delete(struct machine
*machine
)
232 machine__exit(machine
);
237 void machines__init(struct machines
*machines
)
239 machine__init(&machines
->host
, "", HOST_KERNEL_ID
);
240 machines
->guests
= RB_ROOT_CACHED
;
243 void machines__exit(struct machines
*machines
)
245 machine__exit(&machines
->host
);
249 struct machine
*machines__add(struct machines
*machines
, pid_t pid
,
250 const char *root_dir
)
252 struct rb_node
**p
= &machines
->guests
.rb_root
.rb_node
;
253 struct rb_node
*parent
= NULL
;
254 struct machine
*pos
, *machine
= malloc(sizeof(*machine
));
255 bool leftmost
= true;
260 if (machine__init(machine
, root_dir
, pid
) != 0) {
267 pos
= rb_entry(parent
, struct machine
, rb_node
);
276 rb_link_node(&machine
->rb_node
, parent
, p
);
277 rb_insert_color_cached(&machine
->rb_node
, &machines
->guests
, leftmost
);
282 void machines__set_comm_exec(struct machines
*machines
, bool comm_exec
)
286 machines
->host
.comm_exec
= comm_exec
;
288 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
289 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
291 machine
->comm_exec
= comm_exec
;
295 struct machine
*machines__find(struct machines
*machines
, pid_t pid
)
297 struct rb_node
**p
= &machines
->guests
.rb_root
.rb_node
;
298 struct rb_node
*parent
= NULL
;
299 struct machine
*machine
;
300 struct machine
*default_machine
= NULL
;
302 if (pid
== HOST_KERNEL_ID
)
303 return &machines
->host
;
307 machine
= rb_entry(parent
, struct machine
, rb_node
);
308 if (pid
< machine
->pid
)
310 else if (pid
> machine
->pid
)
315 default_machine
= machine
;
318 return default_machine
;
321 struct machine
*machines__findnew(struct machines
*machines
, pid_t pid
)
324 const char *root_dir
= "";
325 struct machine
*machine
= machines__find(machines
, pid
);
327 if (machine
&& (machine
->pid
== pid
))
330 if ((pid
!= HOST_KERNEL_ID
) &&
331 (pid
!= DEFAULT_GUEST_KERNEL_ID
) &&
332 (symbol_conf
.guestmount
)) {
333 sprintf(path
, "%s/%d", symbol_conf
.guestmount
, pid
);
334 if (access(path
, R_OK
)) {
335 static struct strlist
*seen
;
338 seen
= strlist__new(NULL
, NULL
);
340 if (!strlist__has_entry(seen
, path
)) {
341 pr_err("Can't access file %s\n", path
);
342 strlist__add(seen
, path
);
350 machine
= machines__add(machines
, pid
, root_dir
);
355 void machines__process_guests(struct machines
*machines
,
356 machine__process_t process
, void *data
)
360 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
361 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
366 void machines__set_id_hdr_size(struct machines
*machines
, u16 id_hdr_size
)
368 struct rb_node
*node
;
369 struct machine
*machine
;
371 machines
->host
.id_hdr_size
= id_hdr_size
;
373 for (node
= rb_first_cached(&machines
->guests
); node
;
374 node
= rb_next(node
)) {
375 machine
= rb_entry(node
, struct machine
, rb_node
);
376 machine
->id_hdr_size
= id_hdr_size
;
382 static void machine__update_thread_pid(struct machine
*machine
,
383 struct thread
*th
, pid_t pid
)
385 struct thread
*leader
;
387 if (pid
== th
->pid_
|| pid
== -1 || th
->pid_
!= -1)
392 if (th
->pid_
== th
->tid
)
395 leader
= __machine__findnew_thread(machine
, th
->pid_
, th
->pid_
);
400 leader
->mg
= map_groups__new(machine
);
405 if (th
->mg
== leader
->mg
)
410 * Maps are created from MMAP events which provide the pid and
411 * tid. Consequently there never should be any maps on a thread
412 * with an unknown pid. Just print an error if there are.
414 if (!map_groups__empty(th
->mg
))
415 pr_err("Discarding thread maps for %d:%d\n",
417 map_groups__put(th
->mg
);
420 th
->mg
= map_groups__get(leader
->mg
);
425 pr_err("Failed to join map groups for %d:%d\n", th
->pid_
, th
->tid
);
430 * Front-end cache - TID lookups come in blocks,
431 * so most of the time we dont have to look up
434 static struct thread
*
435 __threads__get_last_match(struct threads
*threads
, struct machine
*machine
,
440 th
= threads
->last_match
;
442 if (th
->tid
== tid
) {
443 machine__update_thread_pid(machine
, th
, pid
);
444 return thread__get(th
);
447 threads
->last_match
= NULL
;
453 static struct thread
*
454 threads__get_last_match(struct threads
*threads
, struct machine
*machine
,
457 struct thread
*th
= NULL
;
459 if (perf_singlethreaded
)
460 th
= __threads__get_last_match(threads
, machine
, pid
, tid
);
466 __threads__set_last_match(struct threads
*threads
, struct thread
*th
)
468 threads
->last_match
= th
;
472 threads__set_last_match(struct threads
*threads
, struct thread
*th
)
474 if (perf_singlethreaded
)
475 __threads__set_last_match(threads
, th
);
479 * Caller must eventually drop thread->refcnt returned with a successful
480 * lookup/new thread inserted.
482 static struct thread
*____machine__findnew_thread(struct machine
*machine
,
483 struct threads
*threads
,
484 pid_t pid
, pid_t tid
,
487 struct rb_node
**p
= &threads
->entries
.rb_root
.rb_node
;
488 struct rb_node
*parent
= NULL
;
490 bool leftmost
= true;
492 th
= threads__get_last_match(threads
, machine
, pid
, tid
);
498 th
= rb_entry(parent
, struct thread
, rb_node
);
500 if (th
->tid
== tid
) {
501 threads__set_last_match(threads
, th
);
502 machine__update_thread_pid(machine
, th
, pid
);
503 return thread__get(th
);
517 th
= thread__new(pid
, tid
);
519 rb_link_node(&th
->rb_node
, parent
, p
);
520 rb_insert_color_cached(&th
->rb_node
, &threads
->entries
, leftmost
);
523 * We have to initialize map_groups separately
524 * after rb tree is updated.
526 * The reason is that we call machine__findnew_thread
527 * within thread__init_map_groups to find the thread
528 * leader and that would screwed the rb tree.
530 if (thread__init_map_groups(th
, machine
)) {
531 rb_erase_cached(&th
->rb_node
, &threads
->entries
);
532 RB_CLEAR_NODE(&th
->rb_node
);
537 * It is now in the rbtree, get a ref
540 threads__set_last_match(threads
, th
);
547 struct thread
*__machine__findnew_thread(struct machine
*machine
, pid_t pid
, pid_t tid
)
549 return ____machine__findnew_thread(machine
, machine__threads(machine
, tid
), pid
, tid
, true);
552 struct thread
*machine__findnew_thread(struct machine
*machine
, pid_t pid
,
555 struct threads
*threads
= machine__threads(machine
, tid
);
558 down_write(&threads
->lock
);
559 th
= __machine__findnew_thread(machine
, pid
, tid
);
560 up_write(&threads
->lock
);
564 struct thread
*machine__find_thread(struct machine
*machine
, pid_t pid
,
567 struct threads
*threads
= machine__threads(machine
, tid
);
570 down_read(&threads
->lock
);
571 th
= ____machine__findnew_thread(machine
, threads
, pid
, tid
, false);
572 up_read(&threads
->lock
);
576 struct comm
*machine__thread_exec_comm(struct machine
*machine
,
577 struct thread
*thread
)
579 if (machine
->comm_exec
)
580 return thread__exec_comm(thread
);
582 return thread__comm(thread
);
585 int machine__process_comm_event(struct machine
*machine
, union perf_event
*event
,
586 struct perf_sample
*sample
)
588 struct thread
*thread
= machine__findnew_thread(machine
,
591 bool exec
= event
->header
.misc
& PERF_RECORD_MISC_COMM_EXEC
;
595 machine
->comm_exec
= true;
598 perf_event__fprintf_comm(event
, stdout
);
600 if (thread
== NULL
||
601 __thread__set_comm(thread
, event
->comm
.comm
, sample
->time
, exec
)) {
602 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
611 int machine__process_namespaces_event(struct machine
*machine __maybe_unused
,
612 union perf_event
*event
,
613 struct perf_sample
*sample __maybe_unused
)
615 struct thread
*thread
= machine__findnew_thread(machine
,
616 event
->namespaces
.pid
,
617 event
->namespaces
.tid
);
620 WARN_ONCE(event
->namespaces
.nr_namespaces
> NR_NAMESPACES
,
621 "\nWARNING: kernel seems to support more namespaces than perf"
622 " tool.\nTry updating the perf tool..\n\n");
624 WARN_ONCE(event
->namespaces
.nr_namespaces
< NR_NAMESPACES
,
625 "\nWARNING: perf tool seems to support more namespaces than"
626 " the kernel.\nTry updating the kernel..\n\n");
629 perf_event__fprintf_namespaces(event
, stdout
);
631 if (thread
== NULL
||
632 thread__set_namespaces(thread
, sample
->time
, &event
->namespaces
)) {
633 dump_printf("problem processing PERF_RECORD_NAMESPACES, skipping event.\n");
642 int machine__process_lost_event(struct machine
*machine __maybe_unused
,
643 union perf_event
*event
, struct perf_sample
*sample __maybe_unused
)
645 dump_printf(": id:%" PRIu64
": lost:%" PRIu64
"\n",
646 event
->lost
.id
, event
->lost
.lost
);
650 int machine__process_lost_samples_event(struct machine
*machine __maybe_unused
,
651 union perf_event
*event
, struct perf_sample
*sample
)
653 dump_printf(": id:%" PRIu64
": lost samples :%" PRIu64
"\n",
654 sample
->id
, event
->lost_samples
.lost
);
658 static struct dso
*machine__findnew_module_dso(struct machine
*machine
,
660 const char *filename
)
664 down_write(&machine
->dsos
.lock
);
666 dso
= __dsos__find(&machine
->dsos
, m
->name
, true);
668 dso
= __dsos__addnew(&machine
->dsos
, m
->name
);
672 dso__set_module_info(dso
, m
, machine
);
673 dso__set_long_name(dso
, strdup(filename
), true);
678 up_write(&machine
->dsos
.lock
);
682 int machine__process_aux_event(struct machine
*machine __maybe_unused
,
683 union perf_event
*event
)
686 perf_event__fprintf_aux(event
, stdout
);
690 int machine__process_itrace_start_event(struct machine
*machine __maybe_unused
,
691 union perf_event
*event
)
694 perf_event__fprintf_itrace_start(event
, stdout
);
698 int machine__process_switch_event(struct machine
*machine __maybe_unused
,
699 union perf_event
*event
)
702 perf_event__fprintf_switch(event
, stdout
);
706 static int machine__process_ksymbol_register(struct machine
*machine
,
707 union perf_event
*event
,
708 struct perf_sample
*sample __maybe_unused
)
713 map
= map_groups__find(&machine
->kmaps
, event
->ksymbol_event
.addr
);
715 map
= dso__new_map(event
->ksymbol_event
.name
);
719 map
->start
= event
->ksymbol_event
.addr
;
720 map
->end
= map
->start
+ event
->ksymbol_event
.len
;
721 map_groups__insert(&machine
->kmaps
, map
);
724 sym
= symbol__new(map
->map_ip(map
, map
->start
),
725 event
->ksymbol_event
.len
,
726 0, 0, event
->ksymbol_event
.name
);
729 dso__insert_symbol(map
->dso
, sym
);
733 static int machine__process_ksymbol_unregister(struct machine
*machine
,
734 union perf_event
*event
,
735 struct perf_sample
*sample __maybe_unused
)
739 map
= map_groups__find(&machine
->kmaps
, event
->ksymbol_event
.addr
);
741 map_groups__remove(&machine
->kmaps
, map
);
746 int machine__process_ksymbol(struct machine
*machine __maybe_unused
,
747 union perf_event
*event
,
748 struct perf_sample
*sample
)
751 perf_event__fprintf_ksymbol(event
, stdout
);
753 if (event
->ksymbol_event
.flags
& PERF_RECORD_KSYMBOL_FLAGS_UNREGISTER
)
754 return machine__process_ksymbol_unregister(machine
, event
,
756 return machine__process_ksymbol_register(machine
, event
, sample
);
759 static void dso__adjust_kmod_long_name(struct dso
*dso
, const char *filename
)
761 const char *dup_filename
;
763 if (!filename
|| !dso
|| !dso
->long_name
)
765 if (dso
->long_name
[0] != '[')
767 if (!strchr(filename
, '/'))
770 dup_filename
= strdup(filename
);
774 dso__set_long_name(dso
, dup_filename
, true);
777 struct map
*machine__findnew_module_map(struct machine
*machine
, u64 start
,
778 const char *filename
)
780 struct map
*map
= NULL
;
781 struct dso
*dso
= NULL
;
784 if (kmod_path__parse_name(&m
, filename
))
787 map
= map_groups__find_by_name(&machine
->kmaps
, m
.name
);
790 * If the map's dso is an offline module, give dso__load()
791 * a chance to find the file path of that module by fixing
794 dso__adjust_kmod_long_name(map
->dso
, filename
);
798 dso
= machine__findnew_module_dso(machine
, &m
, filename
);
802 map
= map__new2(start
, dso
);
806 map_groups__insert(&machine
->kmaps
, map
);
808 /* Put the map here because map_groups__insert alread got it */
811 /* put the dso here, corresponding to machine__findnew_module_dso */
817 size_t machines__fprintf_dsos(struct machines
*machines
, FILE *fp
)
820 size_t ret
= __dsos__fprintf(&machines
->host
.dsos
.head
, fp
);
822 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
823 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
824 ret
+= __dsos__fprintf(&pos
->dsos
.head
, fp
);
830 size_t machine__fprintf_dsos_buildid(struct machine
*m
, FILE *fp
,
831 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
833 return __dsos__fprintf_buildid(&m
->dsos
.head
, fp
, skip
, parm
);
836 size_t machines__fprintf_dsos_buildid(struct machines
*machines
, FILE *fp
,
837 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
840 size_t ret
= machine__fprintf_dsos_buildid(&machines
->host
, fp
, skip
, parm
);
842 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
843 struct machine
*pos
= rb_entry(nd
, struct machine
, rb_node
);
844 ret
+= machine__fprintf_dsos_buildid(pos
, fp
, skip
, parm
);
849 size_t machine__fprintf_vmlinux_path(struct machine
*machine
, FILE *fp
)
853 struct dso
*kdso
= machine__kernel_map(machine
)->dso
;
855 if (kdso
->has_build_id
) {
856 char filename
[PATH_MAX
];
857 if (dso__build_id_filename(kdso
, filename
, sizeof(filename
),
859 printed
+= fprintf(fp
, "[0] %s\n", filename
);
862 for (i
= 0; i
< vmlinux_path__nr_entries
; ++i
)
863 printed
+= fprintf(fp
, "[%d] %s\n",
864 i
+ kdso
->has_build_id
, vmlinux_path
[i
]);
869 size_t machine__fprintf(struct machine
*machine
, FILE *fp
)
875 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
876 struct threads
*threads
= &machine
->threads
[i
];
878 down_read(&threads
->lock
);
880 ret
= fprintf(fp
, "Threads: %u\n", threads
->nr
);
882 for (nd
= rb_first_cached(&threads
->entries
); nd
;
884 struct thread
*pos
= rb_entry(nd
, struct thread
, rb_node
);
886 ret
+= thread__fprintf(pos
, fp
);
889 up_read(&threads
->lock
);
894 static struct dso
*machine__get_kernel(struct machine
*machine
)
896 const char *vmlinux_name
= machine
->mmap_name
;
899 if (machine__is_host(machine
)) {
900 if (symbol_conf
.vmlinux_name
)
901 vmlinux_name
= symbol_conf
.vmlinux_name
;
903 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
904 "[kernel]", DSO_TYPE_KERNEL
);
906 if (symbol_conf
.default_guest_vmlinux_name
)
907 vmlinux_name
= symbol_conf
.default_guest_vmlinux_name
;
909 kernel
= machine__findnew_kernel(machine
, vmlinux_name
,
911 DSO_TYPE_GUEST_KERNEL
);
914 if (kernel
!= NULL
&& (!kernel
->has_build_id
))
915 dso__read_running_kernel_build_id(kernel
, machine
);
920 struct process_args
{
924 void machine__get_kallsyms_filename(struct machine
*machine
, char *buf
,
927 if (machine__is_default_guest(machine
))
928 scnprintf(buf
, bufsz
, "%s", symbol_conf
.default_guest_kallsyms
);
930 scnprintf(buf
, bufsz
, "%s/proc/kallsyms", machine
->root_dir
);
933 const char *ref_reloc_sym_names
[] = {"_text", "_stext", NULL
};
935 /* Figure out the start address of kernel map from /proc/kallsyms.
936 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
937 * symbol_name if it's not that important.
939 static int machine__get_running_kernel_start(struct machine
*machine
,
940 const char **symbol_name
,
941 u64
*start
, u64
*end
)
943 char filename
[PATH_MAX
];
948 machine__get_kallsyms_filename(machine
, filename
, PATH_MAX
);
950 if (symbol__restricted_filename(filename
, "/proc/kallsyms"))
953 for (i
= 0; (name
= ref_reloc_sym_names
[i
]) != NULL
; i
++) {
954 err
= kallsyms__get_function_start(filename
, name
, &addr
);
967 err
= kallsyms__get_function_start(filename
, "_etext", &addr
);
974 int machine__create_extra_kernel_map(struct machine
*machine
,
976 struct extra_kernel_map
*xm
)
981 map
= map__new2(xm
->start
, kernel
);
986 map
->pgoff
= xm
->pgoff
;
988 kmap
= map__kmap(map
);
990 kmap
->kmaps
= &machine
->kmaps
;
991 strlcpy(kmap
->name
, xm
->name
, KMAP_NAME_LEN
);
993 map_groups__insert(&machine
->kmaps
, map
);
995 pr_debug2("Added extra kernel map %s %" PRIx64
"-%" PRIx64
"\n",
996 kmap
->name
, map
->start
, map
->end
);
1003 static u64
find_entry_trampoline(struct dso
*dso
)
1005 /* Duplicates are removed so lookup all aliases */
1006 const char *syms
[] = {
1007 "_entry_trampoline",
1008 "__entry_trampoline_start",
1009 "entry_SYSCALL_64_trampoline",
1011 struct symbol
*sym
= dso__first_symbol(dso
);
1014 for (; sym
; sym
= dso__next_symbol(sym
)) {
1015 if (sym
->binding
!= STB_GLOBAL
)
1017 for (i
= 0; i
< ARRAY_SIZE(syms
); i
++) {
1018 if (!strcmp(sym
->name
, syms
[i
]))
1027 * These values can be used for kernels that do not have symbols for the entry
1028 * trampolines in kallsyms.
1030 #define X86_64_CPU_ENTRY_AREA_PER_CPU 0xfffffe0000000000ULL
1031 #define X86_64_CPU_ENTRY_AREA_SIZE 0x2c000
1032 #define X86_64_ENTRY_TRAMPOLINE 0x6000
1034 /* Map x86_64 PTI entry trampolines */
1035 int machine__map_x86_64_entry_trampolines(struct machine
*machine
,
1038 struct map_groups
*kmaps
= &machine
->kmaps
;
1039 struct maps
*maps
= &kmaps
->maps
;
1040 int nr_cpus_avail
, cpu
;
1046 * In the vmlinux case, pgoff is a virtual address which must now be
1047 * mapped to a vmlinux offset.
1049 for (map
= maps__first(maps
); map
; map
= map__next(map
)) {
1050 struct kmap
*kmap
= __map__kmap(map
);
1051 struct map
*dest_map
;
1053 if (!kmap
|| !is_entry_trampoline(kmap
->name
))
1056 dest_map
= map_groups__find(kmaps
, map
->pgoff
);
1057 if (dest_map
!= map
)
1058 map
->pgoff
= dest_map
->map_ip(dest_map
, map
->pgoff
);
1061 if (found
|| machine
->trampolines_mapped
)
1064 pgoff
= find_entry_trampoline(kernel
);
1068 nr_cpus_avail
= machine__nr_cpus_avail(machine
);
1070 /* Add a 1 page map for each CPU's entry trampoline */
1071 for (cpu
= 0; cpu
< nr_cpus_avail
; cpu
++) {
1072 u64 va
= X86_64_CPU_ENTRY_AREA_PER_CPU
+
1073 cpu
* X86_64_CPU_ENTRY_AREA_SIZE
+
1074 X86_64_ENTRY_TRAMPOLINE
;
1075 struct extra_kernel_map xm
= {
1077 .end
= va
+ page_size
,
1081 strlcpy(xm
.name
, ENTRY_TRAMPOLINE_NAME
, KMAP_NAME_LEN
);
1083 if (machine__create_extra_kernel_map(machine
, kernel
, &xm
) < 0)
1087 machine
->trampolines_mapped
= nr_cpus_avail
;
1092 int __weak
machine__create_extra_kernel_maps(struct machine
*machine __maybe_unused
,
1093 struct dso
*kernel __maybe_unused
)
1099 __machine__create_kernel_maps(struct machine
*machine
, struct dso
*kernel
)
1104 /* In case of renewal the kernel map, destroy previous one */
1105 machine__destroy_kernel_maps(machine
);
1107 machine
->vmlinux_map
= map__new2(0, kernel
);
1108 if (machine
->vmlinux_map
== NULL
)
1111 machine
->vmlinux_map
->map_ip
= machine
->vmlinux_map
->unmap_ip
= identity__map_ip
;
1112 map
= machine__kernel_map(machine
);
1113 kmap
= map__kmap(map
);
1117 kmap
->kmaps
= &machine
->kmaps
;
1118 map_groups__insert(&machine
->kmaps
, map
);
1123 void machine__destroy_kernel_maps(struct machine
*machine
)
1126 struct map
*map
= machine__kernel_map(machine
);
1131 kmap
= map__kmap(map
);
1132 map_groups__remove(&machine
->kmaps
, map
);
1133 if (kmap
&& kmap
->ref_reloc_sym
) {
1134 zfree((char **)&kmap
->ref_reloc_sym
->name
);
1135 zfree(&kmap
->ref_reloc_sym
);
1138 map__zput(machine
->vmlinux_map
);
1141 int machines__create_guest_kernel_maps(struct machines
*machines
)
1144 struct dirent
**namelist
= NULL
;
1146 char path
[PATH_MAX
];
1150 if (symbol_conf
.default_guest_vmlinux_name
||
1151 symbol_conf
.default_guest_modules
||
1152 symbol_conf
.default_guest_kallsyms
) {
1153 machines__create_kernel_maps(machines
, DEFAULT_GUEST_KERNEL_ID
);
1156 if (symbol_conf
.guestmount
) {
1157 items
= scandir(symbol_conf
.guestmount
, &namelist
, NULL
, NULL
);
1160 for (i
= 0; i
< items
; i
++) {
1161 if (!isdigit(namelist
[i
]->d_name
[0])) {
1162 /* Filter out . and .. */
1165 pid
= (pid_t
)strtol(namelist
[i
]->d_name
, &endp
, 10);
1166 if ((*endp
!= '\0') ||
1167 (endp
== namelist
[i
]->d_name
) ||
1168 (errno
== ERANGE
)) {
1169 pr_debug("invalid directory (%s). Skipping.\n",
1170 namelist
[i
]->d_name
);
1173 sprintf(path
, "%s/%s/proc/kallsyms",
1174 symbol_conf
.guestmount
,
1175 namelist
[i
]->d_name
);
1176 ret
= access(path
, R_OK
);
1178 pr_debug("Can't access file %s\n", path
);
1181 machines__create_kernel_maps(machines
, pid
);
1190 void machines__destroy_kernel_maps(struct machines
*machines
)
1192 struct rb_node
*next
= rb_first_cached(&machines
->guests
);
1194 machine__destroy_kernel_maps(&machines
->host
);
1197 struct machine
*pos
= rb_entry(next
, struct machine
, rb_node
);
1199 next
= rb_next(&pos
->rb_node
);
1200 rb_erase_cached(&pos
->rb_node
, &machines
->guests
);
1201 machine__delete(pos
);
1205 int machines__create_kernel_maps(struct machines
*machines
, pid_t pid
)
1207 struct machine
*machine
= machines__findnew(machines
, pid
);
1209 if (machine
== NULL
)
1212 return machine__create_kernel_maps(machine
);
1215 int machine__load_kallsyms(struct machine
*machine
, const char *filename
)
1217 struct map
*map
= machine__kernel_map(machine
);
1218 int ret
= __dso__load_kallsyms(map
->dso
, filename
, map
, true);
1221 dso__set_loaded(map
->dso
);
1223 * Since /proc/kallsyms will have multiple sessions for the
1224 * kernel, with modules between them, fixup the end of all
1227 map_groups__fixup_end(&machine
->kmaps
);
1233 int machine__load_vmlinux_path(struct machine
*machine
)
1235 struct map
*map
= machine__kernel_map(machine
);
1236 int ret
= dso__load_vmlinux_path(map
->dso
, map
);
1239 dso__set_loaded(map
->dso
);
1244 static char *get_kernel_version(const char *root_dir
)
1246 char version
[PATH_MAX
];
1249 const char *prefix
= "Linux version ";
1251 sprintf(version
, "%s/proc/version", root_dir
);
1252 file
= fopen(version
, "r");
1256 tmp
= fgets(version
, sizeof(version
), file
);
1261 name
= strstr(version
, prefix
);
1264 name
+= strlen(prefix
);
1265 tmp
= strchr(name
, ' ');
1269 return strdup(name
);
1272 static bool is_kmod_dso(struct dso
*dso
)
1274 return dso
->symtab_type
== DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE
||
1275 dso
->symtab_type
== DSO_BINARY_TYPE__GUEST_KMODULE
;
1278 static int map_groups__set_module_path(struct map_groups
*mg
, const char *path
,
1279 struct kmod_path
*m
)
1282 struct map
*map
= map_groups__find_by_name(mg
, m
->name
);
1287 long_name
= strdup(path
);
1288 if (long_name
== NULL
)
1291 dso__set_long_name(map
->dso
, long_name
, true);
1292 dso__kernel_module_get_build_id(map
->dso
, "");
1295 * Full name could reveal us kmod compression, so
1296 * we need to update the symtab_type if needed.
1298 if (m
->comp
&& is_kmod_dso(map
->dso
)) {
1299 map
->dso
->symtab_type
++;
1300 map
->dso
->comp
= m
->comp
;
1306 static int map_groups__set_modules_path_dir(struct map_groups
*mg
,
1307 const char *dir_name
, int depth
)
1309 struct dirent
*dent
;
1310 DIR *dir
= opendir(dir_name
);
1314 pr_debug("%s: cannot open %s dir\n", __func__
, dir_name
);
1318 while ((dent
= readdir(dir
)) != NULL
) {
1319 char path
[PATH_MAX
];
1322 /*sshfs might return bad dent->d_type, so we have to stat*/
1323 snprintf(path
, sizeof(path
), "%s/%s", dir_name
, dent
->d_name
);
1324 if (stat(path
, &st
))
1327 if (S_ISDIR(st
.st_mode
)) {
1328 if (!strcmp(dent
->d_name
, ".") ||
1329 !strcmp(dent
->d_name
, ".."))
1332 /* Do not follow top-level source and build symlinks */
1334 if (!strcmp(dent
->d_name
, "source") ||
1335 !strcmp(dent
->d_name
, "build"))
1339 ret
= map_groups__set_modules_path_dir(mg
, path
,
1346 ret
= kmod_path__parse_name(&m
, dent
->d_name
);
1351 ret
= map_groups__set_module_path(mg
, path
, &m
);
1365 static int machine__set_modules_path(struct machine
*machine
)
1368 char modules_path
[PATH_MAX
];
1370 version
= get_kernel_version(machine
->root_dir
);
1374 snprintf(modules_path
, sizeof(modules_path
), "%s/lib/modules/%s",
1375 machine
->root_dir
, version
);
1378 return map_groups__set_modules_path_dir(&machine
->kmaps
, modules_path
, 0);
1380 int __weak
arch__fix_module_text_start(u64
*start __maybe_unused
,
1381 u64
*size __maybe_unused
,
1382 const char *name __maybe_unused
)
1387 static int machine__create_module(void *arg
, const char *name
, u64 start
,
1390 struct machine
*machine
= arg
;
1393 if (arch__fix_module_text_start(&start
, &size
, name
) < 0)
1396 map
= machine__findnew_module_map(machine
, start
, name
);
1399 map
->end
= start
+ size
;
1401 dso__kernel_module_get_build_id(map
->dso
, machine
->root_dir
);
1406 static int machine__create_modules(struct machine
*machine
)
1408 const char *modules
;
1409 char path
[PATH_MAX
];
1411 if (machine__is_default_guest(machine
)) {
1412 modules
= symbol_conf
.default_guest_modules
;
1414 snprintf(path
, PATH_MAX
, "%s/proc/modules", machine
->root_dir
);
1418 if (symbol__restricted_filename(modules
, "/proc/modules"))
1421 if (modules__parse(modules
, machine
, machine__create_module
))
1424 if (!machine__set_modules_path(machine
))
1427 pr_debug("Problems setting modules path maps, continuing anyway...\n");
1432 static void machine__set_kernel_mmap(struct machine
*machine
,
1435 machine
->vmlinux_map
->start
= start
;
1436 machine
->vmlinux_map
->end
= end
;
1438 * Be a bit paranoid here, some perf.data file came with
1439 * a zero sized synthesized MMAP event for the kernel.
1441 if (start
== 0 && end
== 0)
1442 machine
->vmlinux_map
->end
= ~0ULL;
1445 static void machine__update_kernel_mmap(struct machine
*machine
,
1448 struct map
*map
= machine__kernel_map(machine
);
1451 map_groups__remove(&machine
->kmaps
, map
);
1453 machine__set_kernel_mmap(machine
, start
, end
);
1455 map_groups__insert(&machine
->kmaps
, map
);
1459 int machine__create_kernel_maps(struct machine
*machine
)
1461 struct dso
*kernel
= machine__get_kernel(machine
);
1462 const char *name
= NULL
;
1464 u64 start
= 0, end
= ~0ULL;
1470 ret
= __machine__create_kernel_maps(machine
, kernel
);
1474 if (symbol_conf
.use_modules
&& machine__create_modules(machine
) < 0) {
1475 if (machine__is_host(machine
))
1476 pr_debug("Problems creating module maps, "
1477 "continuing anyway...\n");
1479 pr_debug("Problems creating module maps for guest %d, "
1480 "continuing anyway...\n", machine
->pid
);
1483 if (!machine__get_running_kernel_start(machine
, &name
, &start
, &end
)) {
1485 map__set_kallsyms_ref_reloc_sym(machine
->vmlinux_map
, name
, start
)) {
1486 machine__destroy_kernel_maps(machine
);
1492 * we have a real start address now, so re-order the kmaps
1493 * assume it's the last in the kmaps
1495 machine__update_kernel_mmap(machine
, start
, end
);
1498 if (machine__create_extra_kernel_maps(machine
, kernel
))
1499 pr_debug("Problems creating extra kernel maps, continuing anyway...\n");
1502 /* update end address of the kernel map using adjacent module address */
1503 map
= map__next(machine__kernel_map(machine
));
1505 machine__set_kernel_mmap(machine
, start
, map
->start
);
1513 static bool machine__uses_kcore(struct machine
*machine
)
1517 list_for_each_entry(dso
, &machine
->dsos
.head
, node
) {
1518 if (dso__is_kcore(dso
))
1525 static bool perf_event__is_extra_kernel_mmap(struct machine
*machine
,
1526 union perf_event
*event
)
1528 return machine__is(machine
, "x86_64") &&
1529 is_entry_trampoline(event
->mmap
.filename
);
1532 static int machine__process_extra_kernel_map(struct machine
*machine
,
1533 union perf_event
*event
)
1535 struct map
*kernel_map
= machine__kernel_map(machine
);
1536 struct dso
*kernel
= kernel_map
? kernel_map
->dso
: NULL
;
1537 struct extra_kernel_map xm
= {
1538 .start
= event
->mmap
.start
,
1539 .end
= event
->mmap
.start
+ event
->mmap
.len
,
1540 .pgoff
= event
->mmap
.pgoff
,
1546 strlcpy(xm
.name
, event
->mmap
.filename
, KMAP_NAME_LEN
);
1548 return machine__create_extra_kernel_map(machine
, kernel
, &xm
);
1551 static int machine__process_kernel_mmap_event(struct machine
*machine
,
1552 union perf_event
*event
)
1555 enum dso_kernel_type kernel_type
;
1556 bool is_kernel_mmap
;
1558 /* If we have maps from kcore then we do not need or want any others */
1559 if (machine__uses_kcore(machine
))
1562 if (machine__is_host(machine
))
1563 kernel_type
= DSO_TYPE_KERNEL
;
1565 kernel_type
= DSO_TYPE_GUEST_KERNEL
;
1567 is_kernel_mmap
= memcmp(event
->mmap
.filename
,
1569 strlen(machine
->mmap_name
) - 1) == 0;
1570 if (event
->mmap
.filename
[0] == '/' ||
1571 (!is_kernel_mmap
&& event
->mmap
.filename
[0] == '[')) {
1572 map
= machine__findnew_module_map(machine
, event
->mmap
.start
,
1573 event
->mmap
.filename
);
1577 map
->end
= map
->start
+ event
->mmap
.len
;
1578 } else if (is_kernel_mmap
) {
1579 const char *symbol_name
= (event
->mmap
.filename
+
1580 strlen(machine
->mmap_name
));
1582 * Should be there already, from the build-id table in
1585 struct dso
*kernel
= NULL
;
1588 down_read(&machine
->dsos
.lock
);
1590 list_for_each_entry(dso
, &machine
->dsos
.head
, node
) {
1593 * The cpumode passed to is_kernel_module is not the
1594 * cpumode of *this* event. If we insist on passing
1595 * correct cpumode to is_kernel_module, we should
1596 * record the cpumode when we adding this dso to the
1599 * However we don't really need passing correct
1600 * cpumode. We know the correct cpumode must be kernel
1601 * mode (if not, we should not link it onto kernel_dsos
1604 * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
1605 * is_kernel_module() treats it as a kernel cpumode.
1609 is_kernel_module(dso
->long_name
,
1610 PERF_RECORD_MISC_CPUMODE_UNKNOWN
))
1618 up_read(&machine
->dsos
.lock
);
1621 kernel
= machine__findnew_dso(machine
, machine
->mmap_name
);
1625 kernel
->kernel
= kernel_type
;
1626 if (__machine__create_kernel_maps(machine
, kernel
) < 0) {
1631 if (strstr(kernel
->long_name
, "vmlinux"))
1632 dso__set_short_name(kernel
, "[kernel.vmlinux]", false);
1634 machine__update_kernel_mmap(machine
, event
->mmap
.start
,
1635 event
->mmap
.start
+ event
->mmap
.len
);
1638 * Avoid using a zero address (kptr_restrict) for the ref reloc
1639 * symbol. Effectively having zero here means that at record
1640 * time /proc/sys/kernel/kptr_restrict was non zero.
1642 if (event
->mmap
.pgoff
!= 0) {
1643 map__set_kallsyms_ref_reloc_sym(machine
->vmlinux_map
,
1648 if (machine__is_default_guest(machine
)) {
1650 * preload dso of guest kernel and modules
1652 dso__load(kernel
, machine__kernel_map(machine
));
1654 } else if (perf_event__is_extra_kernel_mmap(machine
, event
)) {
1655 return machine__process_extra_kernel_map(machine
, event
);
1662 int machine__process_mmap2_event(struct machine
*machine
,
1663 union perf_event
*event
,
1664 struct perf_sample
*sample
)
1666 struct thread
*thread
;
1671 perf_event__fprintf_mmap2(event
, stdout
);
1673 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1674 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1675 ret
= machine__process_kernel_mmap_event(machine
, event
);
1681 thread
= machine__findnew_thread(machine
, event
->mmap2
.pid
,
1686 map
= map__new(machine
, event
->mmap2
.start
,
1687 event
->mmap2
.len
, event
->mmap2
.pgoff
,
1689 event
->mmap2
.min
, event
->mmap2
.ino
,
1690 event
->mmap2
.ino_generation
,
1693 event
->mmap2
.filename
, thread
);
1696 goto out_problem_map
;
1698 ret
= thread__insert_map(thread
, map
);
1700 goto out_problem_insert
;
1702 thread__put(thread
);
1709 thread__put(thread
);
1711 dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
1715 int machine__process_mmap_event(struct machine
*machine
, union perf_event
*event
,
1716 struct perf_sample
*sample
)
1718 struct thread
*thread
;
1724 perf_event__fprintf_mmap(event
, stdout
);
1726 if (sample
->cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
||
1727 sample
->cpumode
== PERF_RECORD_MISC_KERNEL
) {
1728 ret
= machine__process_kernel_mmap_event(machine
, event
);
1734 thread
= machine__findnew_thread(machine
, event
->mmap
.pid
,
1739 if (!(event
->header
.misc
& PERF_RECORD_MISC_MMAP_DATA
))
1742 map
= map__new(machine
, event
->mmap
.start
,
1743 event
->mmap
.len
, event
->mmap
.pgoff
,
1744 0, 0, 0, 0, prot
, 0,
1745 event
->mmap
.filename
,
1749 goto out_problem_map
;
1751 ret
= thread__insert_map(thread
, map
);
1753 goto out_problem_insert
;
1755 thread__put(thread
);
1762 thread__put(thread
);
1764 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1768 static void __machine__remove_thread(struct machine
*machine
, struct thread
*th
, bool lock
)
1770 struct threads
*threads
= machine__threads(machine
, th
->tid
);
1772 if (threads
->last_match
== th
)
1773 threads__set_last_match(threads
, NULL
);
1776 down_write(&threads
->lock
);
1778 BUG_ON(refcount_read(&th
->refcnt
) == 0);
1780 rb_erase_cached(&th
->rb_node
, &threads
->entries
);
1781 RB_CLEAR_NODE(&th
->rb_node
);
1784 * Move it first to the dead_threads list, then drop the reference,
1785 * if this is the last reference, then the thread__delete destructor
1786 * will be called and we will remove it from the dead_threads list.
1788 list_add_tail(&th
->node
, &threads
->dead
);
1791 * We need to do the put here because if this is the last refcount,
1792 * then we will be touching the threads->dead head when removing the
1798 up_write(&threads
->lock
);
1801 void machine__remove_thread(struct machine
*machine
, struct thread
*th
)
1803 return __machine__remove_thread(machine
, th
, true);
1806 int machine__process_fork_event(struct machine
*machine
, union perf_event
*event
,
1807 struct perf_sample
*sample
)
1809 struct thread
*thread
= machine__find_thread(machine
,
1812 struct thread
*parent
= machine__findnew_thread(machine
,
1815 bool do_maps_clone
= true;
1819 perf_event__fprintf_task(event
, stdout
);
1822 * There may be an existing thread that is not actually the parent,
1823 * either because we are processing events out of order, or because the
1824 * (fork) event that would have removed the thread was lost. Assume the
1825 * latter case and continue on as best we can.
1827 if (parent
->pid_
!= (pid_t
)event
->fork
.ppid
) {
1828 dump_printf("removing erroneous parent thread %d/%d\n",
1829 parent
->pid_
, parent
->tid
);
1830 machine__remove_thread(machine
, parent
);
1831 thread__put(parent
);
1832 parent
= machine__findnew_thread(machine
, event
->fork
.ppid
,
1836 /* if a thread currently exists for the thread id remove it */
1837 if (thread
!= NULL
) {
1838 machine__remove_thread(machine
, thread
);
1839 thread__put(thread
);
1842 thread
= machine__findnew_thread(machine
, event
->fork
.pid
,
1845 * When synthesizing FORK events, we are trying to create thread
1846 * objects for the already running tasks on the machine.
1848 * Normally, for a kernel FORK event, we want to clone the parent's
1849 * maps because that is what the kernel just did.
1851 * But when synthesizing, this should not be done. If we do, we end up
1852 * with overlapping maps as we process the sythesized MMAP2 events that
1853 * get delivered shortly thereafter.
1855 * Use the FORK event misc flags in an internal way to signal this
1856 * situation, so we can elide the map clone when appropriate.
1858 if (event
->fork
.header
.misc
& PERF_RECORD_MISC_FORK_EXEC
)
1859 do_maps_clone
= false;
1861 if (thread
== NULL
|| parent
== NULL
||
1862 thread__fork(thread
, parent
, sample
->time
, do_maps_clone
) < 0) {
1863 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1866 thread__put(thread
);
1867 thread__put(parent
);
1872 int machine__process_exit_event(struct machine
*machine
, union perf_event
*event
,
1873 struct perf_sample
*sample __maybe_unused
)
1875 struct thread
*thread
= machine__find_thread(machine
,
1880 perf_event__fprintf_task(event
, stdout
);
1882 if (thread
!= NULL
) {
1883 thread__exited(thread
);
1884 thread__put(thread
);
1890 int machine__process_event(struct machine
*machine
, union perf_event
*event
,
1891 struct perf_sample
*sample
)
1895 switch (event
->header
.type
) {
1896 case PERF_RECORD_COMM
:
1897 ret
= machine__process_comm_event(machine
, event
, sample
); break;
1898 case PERF_RECORD_MMAP
:
1899 ret
= machine__process_mmap_event(machine
, event
, sample
); break;
1900 case PERF_RECORD_NAMESPACES
:
1901 ret
= machine__process_namespaces_event(machine
, event
, sample
); break;
1902 case PERF_RECORD_MMAP2
:
1903 ret
= machine__process_mmap2_event(machine
, event
, sample
); break;
1904 case PERF_RECORD_FORK
:
1905 ret
= machine__process_fork_event(machine
, event
, sample
); break;
1906 case PERF_RECORD_EXIT
:
1907 ret
= machine__process_exit_event(machine
, event
, sample
); break;
1908 case PERF_RECORD_LOST
:
1909 ret
= machine__process_lost_event(machine
, event
, sample
); break;
1910 case PERF_RECORD_AUX
:
1911 ret
= machine__process_aux_event(machine
, event
); break;
1912 case PERF_RECORD_ITRACE_START
:
1913 ret
= machine__process_itrace_start_event(machine
, event
); break;
1914 case PERF_RECORD_LOST_SAMPLES
:
1915 ret
= machine__process_lost_samples_event(machine
, event
, sample
); break;
1916 case PERF_RECORD_SWITCH
:
1917 case PERF_RECORD_SWITCH_CPU_WIDE
:
1918 ret
= machine__process_switch_event(machine
, event
); break;
1919 case PERF_RECORD_KSYMBOL
:
1920 ret
= machine__process_ksymbol(machine
, event
, sample
); break;
1921 case PERF_RECORD_BPF_EVENT
:
1922 ret
= machine__process_bpf_event(machine
, event
, sample
); break;
1931 static bool symbol__match_regex(struct symbol
*sym
, regex_t
*regex
)
1933 if (!regexec(regex
, sym
->name
, 0, NULL
, 0))
1938 static void ip__resolve_ams(struct thread
*thread
,
1939 struct addr_map_symbol
*ams
,
1942 struct addr_location al
;
1944 memset(&al
, 0, sizeof(al
));
1946 * We cannot use the header.misc hint to determine whether a
1947 * branch stack address is user, kernel, guest, hypervisor.
1948 * Branches may straddle the kernel/user/hypervisor boundaries.
1949 * Thus, we have to try consecutively until we find a match
1950 * or else, the symbol is unknown
1952 thread__find_cpumode_addr_location(thread
, ip
, &al
);
1955 ams
->al_addr
= al
.addr
;
1961 static void ip__resolve_data(struct thread
*thread
,
1962 u8 m
, struct addr_map_symbol
*ams
,
1963 u64 addr
, u64 phys_addr
)
1965 struct addr_location al
;
1967 memset(&al
, 0, sizeof(al
));
1969 thread__find_symbol(thread
, m
, addr
, &al
);
1972 ams
->al_addr
= al
.addr
;
1975 ams
->phys_addr
= phys_addr
;
1978 struct mem_info
*sample__resolve_mem(struct perf_sample
*sample
,
1979 struct addr_location
*al
)
1981 struct mem_info
*mi
= mem_info__new();
1986 ip__resolve_ams(al
->thread
, &mi
->iaddr
, sample
->ip
);
1987 ip__resolve_data(al
->thread
, al
->cpumode
, &mi
->daddr
,
1988 sample
->addr
, sample
->phys_addr
);
1989 mi
->data_src
.val
= sample
->data_src
;
1994 static char *callchain_srcline(struct map
*map
, struct symbol
*sym
, u64 ip
)
1996 char *srcline
= NULL
;
1998 if (!map
|| callchain_param
.key
== CCKEY_FUNCTION
)
2001 srcline
= srcline__tree_find(&map
->dso
->srclines
, ip
);
2003 bool show_sym
= false;
2004 bool show_addr
= callchain_param
.key
== CCKEY_ADDRESS
;
2006 srcline
= get_srcline(map
->dso
, map__rip_2objdump(map
, ip
),
2007 sym
, show_sym
, show_addr
, ip
);
2008 srcline__tree_insert(&map
->dso
->srclines
, ip
, srcline
);
2019 static int add_callchain_ip(struct thread
*thread
,
2020 struct callchain_cursor
*cursor
,
2021 struct symbol
**parent
,
2022 struct addr_location
*root_al
,
2026 struct branch_flags
*flags
,
2027 struct iterations
*iter
,
2030 struct addr_location al
;
2031 int nr_loop_iter
= 0;
2032 u64 iter_cycles
= 0;
2033 const char *srcline
= NULL
;
2038 thread__find_cpumode_addr_location(thread
, ip
, &al
);
2040 if (ip
>= PERF_CONTEXT_MAX
) {
2042 case PERF_CONTEXT_HV
:
2043 *cpumode
= PERF_RECORD_MISC_HYPERVISOR
;
2045 case PERF_CONTEXT_KERNEL
:
2046 *cpumode
= PERF_RECORD_MISC_KERNEL
;
2048 case PERF_CONTEXT_USER
:
2049 *cpumode
= PERF_RECORD_MISC_USER
;
2052 pr_debug("invalid callchain context: "
2053 "%"PRId64
"\n", (s64
) ip
);
2055 * It seems the callchain is corrupted.
2058 callchain_cursor_reset(cursor
);
2063 thread__find_symbol(thread
, *cpumode
, ip
, &al
);
2066 if (al
.sym
!= NULL
) {
2067 if (perf_hpp_list
.parent
&& !*parent
&&
2068 symbol__match_regex(al
.sym
, &parent_regex
))
2070 else if (have_ignore_callees
&& root_al
&&
2071 symbol__match_regex(al
.sym
, &ignore_callees_regex
)) {
2072 /* Treat this symbol as the root,
2073 forgetting its callees. */
2075 callchain_cursor_reset(cursor
);
2079 if (symbol_conf
.hide_unresolved
&& al
.sym
== NULL
)
2083 nr_loop_iter
= iter
->nr_loop_iter
;
2084 iter_cycles
= iter
->cycles
;
2087 srcline
= callchain_srcline(al
.map
, al
.sym
, al
.addr
);
2088 return callchain_cursor_append(cursor
, ip
, al
.map
, al
.sym
,
2089 branch
, flags
, nr_loop_iter
,
2090 iter_cycles
, branch_from
, srcline
);
2093 struct branch_info
*sample__resolve_bstack(struct perf_sample
*sample
,
2094 struct addr_location
*al
)
2097 const struct branch_stack
*bs
= sample
->branch_stack
;
2098 struct branch_info
*bi
= calloc(bs
->nr
, sizeof(struct branch_info
));
2103 for (i
= 0; i
< bs
->nr
; i
++) {
2104 ip__resolve_ams(al
->thread
, &bi
[i
].to
, bs
->entries
[i
].to
);
2105 ip__resolve_ams(al
->thread
, &bi
[i
].from
, bs
->entries
[i
].from
);
2106 bi
[i
].flags
= bs
->entries
[i
].flags
;
2111 static void save_iterations(struct iterations
*iter
,
2112 struct branch_entry
*be
, int nr
)
2116 iter
->nr_loop_iter
++;
2119 for (i
= 0; i
< nr
; i
++)
2120 iter
->cycles
+= be
[i
].flags
.cycles
;
2125 #define NO_ENTRY 0xff
2127 #define PERF_MAX_BRANCH_DEPTH 127
2130 static int remove_loops(struct branch_entry
*l
, int nr
,
2131 struct iterations
*iter
)
2134 unsigned char chash
[CHASHSZ
];
2136 memset(chash
, NO_ENTRY
, sizeof(chash
));
2138 BUG_ON(PERF_MAX_BRANCH_DEPTH
> 255);
2140 for (i
= 0; i
< nr
; i
++) {
2141 int h
= hash_64(l
[i
].from
, CHASHBITS
) % CHASHSZ
;
2143 /* no collision handling for now */
2144 if (chash
[h
] == NO_ENTRY
) {
2146 } else if (l
[chash
[h
]].from
== l
[i
].from
) {
2147 bool is_loop
= true;
2148 /* check if it is a real loop */
2150 for (j
= chash
[h
]; j
< i
&& i
+ off
< nr
; j
++, off
++)
2151 if (l
[j
].from
!= l
[i
+ off
].from
) {
2158 save_iterations(iter
+ i
+ off
,
2161 memmove(iter
+ i
, iter
+ i
+ off
,
2164 memmove(l
+ i
, l
+ i
+ off
,
2176 * Recolve LBR callstack chain sample
2178 * 1 on success get LBR callchain information
2179 * 0 no available LBR callchain information, should try fp
2180 * negative error code on other errors.
2182 static int resolve_lbr_callchain_sample(struct thread
*thread
,
2183 struct callchain_cursor
*cursor
,
2184 struct perf_sample
*sample
,
2185 struct symbol
**parent
,
2186 struct addr_location
*root_al
,
2189 struct ip_callchain
*chain
= sample
->callchain
;
2190 int chain_nr
= min(max_stack
, (int)chain
->nr
), i
;
2191 u8 cpumode
= PERF_RECORD_MISC_USER
;
2192 u64 ip
, branch_from
= 0;
2194 for (i
= 0; i
< chain_nr
; i
++) {
2195 if (chain
->ips
[i
] == PERF_CONTEXT_USER
)
2199 /* LBR only affects the user callchain */
2200 if (i
!= chain_nr
) {
2201 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
2202 int lbr_nr
= lbr_stack
->nr
, j
, k
;
2204 struct branch_flags
*flags
;
2206 * LBR callstack can only get user call chain.
2207 * The mix_chain_nr is kernel call chain
2208 * number plus LBR user call chain number.
2209 * i is kernel call chain number,
2210 * 1 is PERF_CONTEXT_USER,
2211 * lbr_nr + 1 is the user call chain number.
2212 * For details, please refer to the comments
2213 * in callchain__printf
2215 int mix_chain_nr
= i
+ 1 + lbr_nr
+ 1;
2217 for (j
= 0; j
< mix_chain_nr
; j
++) {
2222 if (callchain_param
.order
== ORDER_CALLEE
) {
2225 else if (j
> i
+ 1) {
2227 ip
= lbr_stack
->entries
[k
].from
;
2229 flags
= &lbr_stack
->entries
[k
].flags
;
2231 ip
= lbr_stack
->entries
[0].to
;
2233 flags
= &lbr_stack
->entries
[0].flags
;
2235 lbr_stack
->entries
[0].from
;
2240 ip
= lbr_stack
->entries
[k
].from
;
2242 flags
= &lbr_stack
->entries
[k
].flags
;
2244 else if (j
> lbr_nr
)
2245 ip
= chain
->ips
[i
+ 1 - (j
- lbr_nr
)];
2247 ip
= lbr_stack
->entries
[0].to
;
2249 flags
= &lbr_stack
->entries
[0].flags
;
2251 lbr_stack
->entries
[0].from
;
2255 err
= add_callchain_ip(thread
, cursor
, parent
,
2256 root_al
, &cpumode
, ip
,
2257 branch
, flags
, NULL
,
2260 return (err
< 0) ? err
: 0;
2268 static int find_prev_cpumode(struct ip_callchain
*chain
, struct thread
*thread
,
2269 struct callchain_cursor
*cursor
,
2270 struct symbol
**parent
,
2271 struct addr_location
*root_al
,
2272 u8
*cpumode
, int ent
)
2276 while (--ent
>= 0) {
2277 u64 ip
= chain
->ips
[ent
];
2279 if (ip
>= PERF_CONTEXT_MAX
) {
2280 err
= add_callchain_ip(thread
, cursor
, parent
,
2281 root_al
, cpumode
, ip
,
2282 false, NULL
, NULL
, 0);
2289 static int thread__resolve_callchain_sample(struct thread
*thread
,
2290 struct callchain_cursor
*cursor
,
2291 struct perf_evsel
*evsel
,
2292 struct perf_sample
*sample
,
2293 struct symbol
**parent
,
2294 struct addr_location
*root_al
,
2297 struct branch_stack
*branch
= sample
->branch_stack
;
2298 struct ip_callchain
*chain
= sample
->callchain
;
2300 u8 cpumode
= PERF_RECORD_MISC_USER
;
2301 int i
, j
, err
, nr_entries
;
2306 chain_nr
= chain
->nr
;
2308 if (perf_evsel__has_branch_callstack(evsel
)) {
2309 err
= resolve_lbr_callchain_sample(thread
, cursor
, sample
, parent
,
2310 root_al
, max_stack
);
2312 return (err
< 0) ? err
: 0;
2316 * Based on DWARF debug information, some architectures skip
2317 * a callchain entry saved by the kernel.
2319 skip_idx
= arch_skip_callchain_idx(thread
, chain
);
2322 * Add branches to call stack for easier browsing. This gives
2323 * more context for a sample than just the callers.
2325 * This uses individual histograms of paths compared to the
2326 * aggregated histograms the normal LBR mode uses.
2328 * Limitations for now:
2329 * - No extra filters
2330 * - No annotations (should annotate somehow)
2333 if (branch
&& callchain_param
.branch_callstack
) {
2334 int nr
= min(max_stack
, (int)branch
->nr
);
2335 struct branch_entry be
[nr
];
2336 struct iterations iter
[nr
];
2338 if (branch
->nr
> PERF_MAX_BRANCH_DEPTH
) {
2339 pr_warning("corrupted branch chain. skipping...\n");
2343 for (i
= 0; i
< nr
; i
++) {
2344 if (callchain_param
.order
== ORDER_CALLEE
) {
2345 be
[i
] = branch
->entries
[i
];
2351 * Check for overlap into the callchain.
2352 * The return address is one off compared to
2353 * the branch entry. To adjust for this
2354 * assume the calling instruction is not longer
2357 if (i
== skip_idx
||
2358 chain
->ips
[first_call
] >= PERF_CONTEXT_MAX
)
2360 else if (be
[i
].from
< chain
->ips
[first_call
] &&
2361 be
[i
].from
>= chain
->ips
[first_call
] - 8)
2364 be
[i
] = branch
->entries
[branch
->nr
- i
- 1];
2367 memset(iter
, 0, sizeof(struct iterations
) * nr
);
2368 nr
= remove_loops(be
, nr
, iter
);
2370 for (i
= 0; i
< nr
; i
++) {
2371 err
= add_callchain_ip(thread
, cursor
, parent
,
2378 err
= add_callchain_ip(thread
, cursor
, parent
, root_al
,
2395 if (callchain_param
.order
!= ORDER_CALLEE
) {
2396 err
= find_prev_cpumode(chain
, thread
, cursor
, parent
, root_al
,
2397 &cpumode
, chain
->nr
- first_call
);
2399 return (err
< 0) ? err
: 0;
2401 for (i
= first_call
, nr_entries
= 0;
2402 i
< chain_nr
&& nr_entries
< max_stack
; i
++) {
2405 if (callchain_param
.order
== ORDER_CALLEE
)
2408 j
= chain
->nr
- i
- 1;
2410 #ifdef HAVE_SKIP_CALLCHAIN_IDX
2415 if (ip
< PERF_CONTEXT_MAX
)
2417 else if (callchain_param
.order
!= ORDER_CALLEE
) {
2418 err
= find_prev_cpumode(chain
, thread
, cursor
, parent
,
2419 root_al
, &cpumode
, j
);
2421 return (err
< 0) ? err
: 0;
2425 err
= add_callchain_ip(thread
, cursor
, parent
,
2426 root_al
, &cpumode
, ip
,
2427 false, NULL
, NULL
, 0);
2430 return (err
< 0) ? err
: 0;
2436 static int append_inlines(struct callchain_cursor
*cursor
,
2437 struct map
*map
, struct symbol
*sym
, u64 ip
)
2439 struct inline_node
*inline_node
;
2440 struct inline_list
*ilist
;
2444 if (!symbol_conf
.inline_name
|| !map
|| !sym
)
2447 addr
= map__map_ip(map
, ip
);
2448 addr
= map__rip_2objdump(map
, addr
);
2450 inline_node
= inlines__tree_find(&map
->dso
->inlined_nodes
, addr
);
2452 inline_node
= dso__parse_addr_inlines(map
->dso
, addr
, sym
);
2455 inlines__tree_insert(&map
->dso
->inlined_nodes
, inline_node
);
2458 list_for_each_entry(ilist
, &inline_node
->val
, list
) {
2459 ret
= callchain_cursor_append(cursor
, ip
, map
,
2460 ilist
->symbol
, false,
2461 NULL
, 0, 0, 0, ilist
->srcline
);
2470 static int unwind_entry(struct unwind_entry
*entry
, void *arg
)
2472 struct callchain_cursor
*cursor
= arg
;
2473 const char *srcline
= NULL
;
2474 u64 addr
= entry
->ip
;
2476 if (symbol_conf
.hide_unresolved
&& entry
->sym
== NULL
)
2479 if (append_inlines(cursor
, entry
->map
, entry
->sym
, entry
->ip
) == 0)
2483 * Convert entry->ip from a virtual address to an offset in
2484 * its corresponding binary.
2487 addr
= map__map_ip(entry
->map
, entry
->ip
);
2489 srcline
= callchain_srcline(entry
->map
, entry
->sym
, addr
);
2490 return callchain_cursor_append(cursor
, entry
->ip
,
2491 entry
->map
, entry
->sym
,
2492 false, NULL
, 0, 0, 0, srcline
);
2495 static int thread__resolve_callchain_unwind(struct thread
*thread
,
2496 struct callchain_cursor
*cursor
,
2497 struct perf_evsel
*evsel
,
2498 struct perf_sample
*sample
,
2501 /* Can we do dwarf post unwind? */
2502 if (!((evsel
->attr
.sample_type
& PERF_SAMPLE_REGS_USER
) &&
2503 (evsel
->attr
.sample_type
& PERF_SAMPLE_STACK_USER
)))
2506 /* Bail out if nothing was captured. */
2507 if ((!sample
->user_regs
.regs
) ||
2508 (!sample
->user_stack
.size
))
2511 return unwind__get_entries(unwind_entry
, cursor
,
2512 thread
, sample
, max_stack
);
2515 int thread__resolve_callchain(struct thread
*thread
,
2516 struct callchain_cursor
*cursor
,
2517 struct perf_evsel
*evsel
,
2518 struct perf_sample
*sample
,
2519 struct symbol
**parent
,
2520 struct addr_location
*root_al
,
2525 callchain_cursor_reset(cursor
);
2527 if (callchain_param
.order
== ORDER_CALLEE
) {
2528 ret
= thread__resolve_callchain_sample(thread
, cursor
,
2534 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
2538 ret
= thread__resolve_callchain_unwind(thread
, cursor
,
2543 ret
= thread__resolve_callchain_sample(thread
, cursor
,
2552 int machine__for_each_thread(struct machine
*machine
,
2553 int (*fn
)(struct thread
*thread
, void *p
),
2556 struct threads
*threads
;
2558 struct thread
*thread
;
2562 for (i
= 0; i
< THREADS__TABLE_SIZE
; i
++) {
2563 threads
= &machine
->threads
[i
];
2564 for (nd
= rb_first_cached(&threads
->entries
); nd
;
2566 thread
= rb_entry(nd
, struct thread
, rb_node
);
2567 rc
= fn(thread
, priv
);
2572 list_for_each_entry(thread
, &threads
->dead
, node
) {
2573 rc
= fn(thread
, priv
);
2581 int machines__for_each_thread(struct machines
*machines
,
2582 int (*fn
)(struct thread
*thread
, void *p
),
2588 rc
= machine__for_each_thread(&machines
->host
, fn
, priv
);
2592 for (nd
= rb_first_cached(&machines
->guests
); nd
; nd
= rb_next(nd
)) {
2593 struct machine
*machine
= rb_entry(nd
, struct machine
, rb_node
);
2595 rc
= machine__for_each_thread(machine
, fn
, priv
);
2602 int __machine__synthesize_threads(struct machine
*machine
, struct perf_tool
*tool
,
2603 struct target
*target
, struct thread_map
*threads
,
2604 perf_event__handler_t process
, bool data_mmap
,
2605 unsigned int nr_threads_synthesize
)
2607 if (target__has_task(target
))
2608 return perf_event__synthesize_thread_map(tool
, threads
, process
, machine
, data_mmap
);
2609 else if (target__has_cpu(target
))
2610 return perf_event__synthesize_threads(tool
, process
,
2612 nr_threads_synthesize
);
2613 /* command specified */
2617 pid_t
machine__get_current_tid(struct machine
*machine
, int cpu
)
2619 if (cpu
< 0 || cpu
>= MAX_NR_CPUS
|| !machine
->current_tid
)
2622 return machine
->current_tid
[cpu
];
2625 int machine__set_current_tid(struct machine
*machine
, int cpu
, pid_t pid
,
2628 struct thread
*thread
;
2633 if (!machine
->current_tid
) {
2636 machine
->current_tid
= calloc(MAX_NR_CPUS
, sizeof(pid_t
));
2637 if (!machine
->current_tid
)
2639 for (i
= 0; i
< MAX_NR_CPUS
; i
++)
2640 machine
->current_tid
[i
] = -1;
2643 if (cpu
>= MAX_NR_CPUS
) {
2644 pr_err("Requested CPU %d too large. ", cpu
);
2645 pr_err("Consider raising MAX_NR_CPUS\n");
2649 machine
->current_tid
[cpu
] = tid
;
2651 thread
= machine__findnew_thread(machine
, pid
, tid
);
2656 thread__put(thread
);
2662 * Compares the raw arch string. N.B. see instead perf_env__arch() if a
2663 * normalized arch is needed.
2665 bool machine__is(struct machine
*machine
, const char *arch
)
2667 return machine
&& !strcmp(perf_env__raw_arch(machine
->env
), arch
);
2670 int machine__nr_cpus_avail(struct machine
*machine
)
2672 return machine
? perf_env__nr_cpus_avail(machine
->env
) : 0;
2675 int machine__get_kernel_start(struct machine
*machine
)
2677 struct map
*map
= machine__kernel_map(machine
);
2681 * The only addresses above 2^63 are kernel addresses of a 64-bit
2682 * kernel. Note that addresses are unsigned so that on a 32-bit system
2683 * all addresses including kernel addresses are less than 2^32. In
2684 * that case (32-bit system), if the kernel mapping is unknown, all
2685 * addresses will be assumed to be in user space - see
2686 * machine__kernel_ip().
2688 machine
->kernel_start
= 1ULL << 63;
2690 err
= map__load(map
);
2692 * On x86_64, PTI entry trampolines are less than the
2693 * start of kernel text, but still above 2^63. So leave
2694 * kernel_start = 1ULL << 63 for x86_64.
2696 if (!err
&& !machine__is(machine
, "x86_64"))
2697 machine
->kernel_start
= map
->start
;
2702 u8
machine__addr_cpumode(struct machine
*machine
, u8 cpumode
, u64 addr
)
2704 u8 addr_cpumode
= cpumode
;
2707 if (!machine
->single_address_space
)
2710 kernel_ip
= machine__kernel_ip(machine
, addr
);
2712 case PERF_RECORD_MISC_KERNEL
:
2713 case PERF_RECORD_MISC_USER
:
2714 addr_cpumode
= kernel_ip
? PERF_RECORD_MISC_KERNEL
:
2715 PERF_RECORD_MISC_USER
;
2717 case PERF_RECORD_MISC_GUEST_KERNEL
:
2718 case PERF_RECORD_MISC_GUEST_USER
:
2719 addr_cpumode
= kernel_ip
? PERF_RECORD_MISC_GUEST_KERNEL
:
2720 PERF_RECORD_MISC_GUEST_USER
;
2726 return addr_cpumode
;
2729 struct dso
*machine__findnew_dso(struct machine
*machine
, const char *filename
)
2731 return dsos__findnew(&machine
->dsos
, filename
);
2734 char *machine__resolve_kernel_addr(void *vmachine
, unsigned long long *addrp
, char **modp
)
2736 struct machine
*machine
= vmachine
;
2738 struct symbol
*sym
= machine__find_kernel_symbol(machine
, *addrp
, &map
);
2743 *modp
= __map__is_kmodule(map
) ? (char *)map
->dso
->short_name
: NULL
;
2744 *addrp
= map
->unmap_ip(map
, sym
->start
);