1 #include <linux/kernel.h>
2 #include <traceevent/event-parse.h>
16 #include "perf_regs.h"
19 #include "thread-stack.h"
21 static int perf_session__deliver_event(struct perf_session
*session
,
22 union perf_event
*event
,
23 struct perf_sample
*sample
,
24 struct perf_tool
*tool
,
27 static int perf_session__open(struct perf_session
*session
)
29 struct perf_data_file
*file
= session
->file
;
31 if (perf_session__read_header(session
) < 0) {
32 pr_err("incompatible file format (rerun with -v to learn more)\n");
36 if (perf_data_file__is_pipe(file
))
39 if (!perf_evlist__valid_sample_type(session
->evlist
)) {
40 pr_err("non matching sample_type\n");
44 if (!perf_evlist__valid_sample_id_all(session
->evlist
)) {
45 pr_err("non matching sample_id_all\n");
49 if (!perf_evlist__valid_read_format(session
->evlist
)) {
50 pr_err("non matching read_format\n");
57 void perf_session__set_id_hdr_size(struct perf_session
*session
)
59 u16 id_hdr_size
= perf_evlist__id_hdr_size(session
->evlist
);
61 machines__set_id_hdr_size(&session
->machines
, id_hdr_size
);
64 int perf_session__create_kernel_maps(struct perf_session
*session
)
66 int ret
= machine__create_kernel_maps(&session
->machines
.host
);
69 ret
= machines__create_guest_kernel_maps(&session
->machines
);
73 static void perf_session__destroy_kernel_maps(struct perf_session
*session
)
75 machines__destroy_kernel_maps(&session
->machines
);
78 static bool perf_session__has_comm_exec(struct perf_session
*session
)
80 struct perf_evsel
*evsel
;
82 evlist__for_each(session
->evlist
, evsel
) {
83 if (evsel
->attr
.comm_exec
)
90 static void perf_session__set_comm_exec(struct perf_session
*session
)
92 bool comm_exec
= perf_session__has_comm_exec(session
);
94 machines__set_comm_exec(&session
->machines
, comm_exec
);
97 static int ordered_events__deliver_event(struct ordered_events
*oe
,
98 struct ordered_event
*event
)
100 struct perf_sample sample
;
101 struct perf_session
*session
= container_of(oe
, struct perf_session
,
103 int ret
= perf_evlist__parse_sample(session
->evlist
, event
->event
, &sample
);
106 pr_err("Can't parse sample, err = %d\n", ret
);
110 return perf_session__deliver_event(session
, event
->event
, &sample
,
111 session
->tool
, event
->file_offset
);
114 struct perf_session
*perf_session__new(struct perf_data_file
*file
,
115 bool repipe
, struct perf_tool
*tool
)
117 struct perf_session
*session
= zalloc(sizeof(*session
));
122 session
->repipe
= repipe
;
123 session
->tool
= tool
;
124 INIT_LIST_HEAD(&session
->auxtrace_index
);
125 machines__init(&session
->machines
);
126 ordered_events__init(&session
->ordered_events
, ordered_events__deliver_event
);
129 if (perf_data_file__open(file
))
132 session
->file
= file
;
134 if (perf_data_file__is_read(file
)) {
135 if (perf_session__open(session
) < 0)
138 perf_session__set_id_hdr_size(session
);
139 perf_session__set_comm_exec(session
);
142 session
->machines
.host
.env
= &perf_env
;
145 if (!file
|| perf_data_file__is_write(file
)) {
147 * In O_RDONLY mode this will be performed when reading the
148 * kernel MMAP event, in perf_event__process_mmap().
150 if (perf_session__create_kernel_maps(session
) < 0)
151 pr_warning("Cannot read kernel map\n");
154 if (tool
&& tool
->ordering_requires_timestamps
&&
155 tool
->ordered_events
&& !perf_evlist__sample_id_all(session
->evlist
)) {
156 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
157 tool
->ordered_events
= false;
163 perf_data_file__close(file
);
165 perf_session__delete(session
);
170 static void perf_session__delete_threads(struct perf_session
*session
)
172 machine__delete_threads(&session
->machines
.host
);
175 void perf_session__delete(struct perf_session
*session
)
177 auxtrace__free(session
);
178 auxtrace_index__free(&session
->auxtrace_index
);
179 perf_session__destroy_kernel_maps(session
);
180 perf_session__delete_threads(session
);
181 perf_env__exit(&session
->header
.env
);
182 machines__exit(&session
->machines
);
184 perf_data_file__close(session
->file
);
188 static int process_event_synth_tracing_data_stub(struct perf_tool
*tool
190 union perf_event
*event
192 struct perf_session
*session
195 dump_printf(": unhandled!\n");
199 static int process_event_synth_attr_stub(struct perf_tool
*tool __maybe_unused
,
200 union perf_event
*event __maybe_unused
,
201 struct perf_evlist
**pevlist
204 dump_printf(": unhandled!\n");
208 static int process_event_sample_stub(struct perf_tool
*tool __maybe_unused
,
209 union perf_event
*event __maybe_unused
,
210 struct perf_sample
*sample __maybe_unused
,
211 struct perf_evsel
*evsel __maybe_unused
,
212 struct machine
*machine __maybe_unused
)
214 dump_printf(": unhandled!\n");
218 static int process_event_stub(struct perf_tool
*tool __maybe_unused
,
219 union perf_event
*event __maybe_unused
,
220 struct perf_sample
*sample __maybe_unused
,
221 struct machine
*machine __maybe_unused
)
223 dump_printf(": unhandled!\n");
227 static int process_build_id_stub(struct perf_tool
*tool __maybe_unused
,
228 union perf_event
*event __maybe_unused
,
229 struct perf_session
*session __maybe_unused
)
231 dump_printf(": unhandled!\n");
235 static int process_finished_round_stub(struct perf_tool
*tool __maybe_unused
,
236 union perf_event
*event __maybe_unused
,
237 struct ordered_events
*oe __maybe_unused
)
239 dump_printf(": unhandled!\n");
243 static int process_finished_round(struct perf_tool
*tool
,
244 union perf_event
*event
,
245 struct ordered_events
*oe
);
247 static int process_id_index_stub(struct perf_tool
*tool __maybe_unused
,
248 union perf_event
*event __maybe_unused
,
249 struct perf_session
*perf_session
252 dump_printf(": unhandled!\n");
256 static int process_event_auxtrace_info_stub(struct perf_tool
*tool __maybe_unused
,
257 union perf_event
*event __maybe_unused
,
258 struct perf_session
*session __maybe_unused
)
260 dump_printf(": unhandled!\n");
264 static int skipn(int fd
, off_t n
)
270 ret
= read(fd
, buf
, min(n
, (off_t
)sizeof(buf
)));
279 static s64
process_event_auxtrace_stub(struct perf_tool
*tool __maybe_unused
,
280 union perf_event
*event
,
281 struct perf_session
*session
284 dump_printf(": unhandled!\n");
285 if (perf_data_file__is_pipe(session
->file
))
286 skipn(perf_data_file__fd(session
->file
), event
->auxtrace
.size
);
287 return event
->auxtrace
.size
;
291 int process_event_auxtrace_error_stub(struct perf_tool
*tool __maybe_unused
,
292 union perf_event
*event __maybe_unused
,
293 struct perf_session
*session __maybe_unused
)
295 dump_printf(": unhandled!\n");
301 int process_event_thread_map_stub(struct perf_tool
*tool __maybe_unused
,
302 union perf_event
*event __maybe_unused
,
303 struct perf_session
*session __maybe_unused
)
305 dump_printf(": unhandled!\n");
309 void perf_tool__fill_defaults(struct perf_tool
*tool
)
311 if (tool
->sample
== NULL
)
312 tool
->sample
= process_event_sample_stub
;
313 if (tool
->mmap
== NULL
)
314 tool
->mmap
= process_event_stub
;
315 if (tool
->mmap2
== NULL
)
316 tool
->mmap2
= process_event_stub
;
317 if (tool
->comm
== NULL
)
318 tool
->comm
= process_event_stub
;
319 if (tool
->fork
== NULL
)
320 tool
->fork
= process_event_stub
;
321 if (tool
->exit
== NULL
)
322 tool
->exit
= process_event_stub
;
323 if (tool
->lost
== NULL
)
324 tool
->lost
= perf_event__process_lost
;
325 if (tool
->lost_samples
== NULL
)
326 tool
->lost_samples
= perf_event__process_lost_samples
;
327 if (tool
->aux
== NULL
)
328 tool
->aux
= perf_event__process_aux
;
329 if (tool
->itrace_start
== NULL
)
330 tool
->itrace_start
= perf_event__process_itrace_start
;
331 if (tool
->context_switch
== NULL
)
332 tool
->context_switch
= perf_event__process_switch
;
333 if (tool
->read
== NULL
)
334 tool
->read
= process_event_sample_stub
;
335 if (tool
->throttle
== NULL
)
336 tool
->throttle
= process_event_stub
;
337 if (tool
->unthrottle
== NULL
)
338 tool
->unthrottle
= process_event_stub
;
339 if (tool
->attr
== NULL
)
340 tool
->attr
= process_event_synth_attr_stub
;
341 if (tool
->tracing_data
== NULL
)
342 tool
->tracing_data
= process_event_synth_tracing_data_stub
;
343 if (tool
->build_id
== NULL
)
344 tool
->build_id
= process_build_id_stub
;
345 if (tool
->finished_round
== NULL
) {
346 if (tool
->ordered_events
)
347 tool
->finished_round
= process_finished_round
;
349 tool
->finished_round
= process_finished_round_stub
;
351 if (tool
->id_index
== NULL
)
352 tool
->id_index
= process_id_index_stub
;
353 if (tool
->auxtrace_info
== NULL
)
354 tool
->auxtrace_info
= process_event_auxtrace_info_stub
;
355 if (tool
->auxtrace
== NULL
)
356 tool
->auxtrace
= process_event_auxtrace_stub
;
357 if (tool
->auxtrace_error
== NULL
)
358 tool
->auxtrace_error
= process_event_auxtrace_error_stub
;
359 if (tool
->thread_map
== NULL
)
360 tool
->thread_map
= process_event_thread_map_stub
;
363 static void swap_sample_id_all(union perf_event
*event
, void *data
)
365 void *end
= (void *) event
+ event
->header
.size
;
366 int size
= end
- data
;
368 BUG_ON(size
% sizeof(u64
));
369 mem_bswap_64(data
, size
);
372 static void perf_event__all64_swap(union perf_event
*event
,
373 bool sample_id_all __maybe_unused
)
375 struct perf_event_header
*hdr
= &event
->header
;
376 mem_bswap_64(hdr
+ 1, event
->header
.size
- sizeof(*hdr
));
379 static void perf_event__comm_swap(union perf_event
*event
, bool sample_id_all
)
381 event
->comm
.pid
= bswap_32(event
->comm
.pid
);
382 event
->comm
.tid
= bswap_32(event
->comm
.tid
);
385 void *data
= &event
->comm
.comm
;
387 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
388 swap_sample_id_all(event
, data
);
392 static void perf_event__mmap_swap(union perf_event
*event
,
395 event
->mmap
.pid
= bswap_32(event
->mmap
.pid
);
396 event
->mmap
.tid
= bswap_32(event
->mmap
.tid
);
397 event
->mmap
.start
= bswap_64(event
->mmap
.start
);
398 event
->mmap
.len
= bswap_64(event
->mmap
.len
);
399 event
->mmap
.pgoff
= bswap_64(event
->mmap
.pgoff
);
402 void *data
= &event
->mmap
.filename
;
404 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
405 swap_sample_id_all(event
, data
);
409 static void perf_event__mmap2_swap(union perf_event
*event
,
412 event
->mmap2
.pid
= bswap_32(event
->mmap2
.pid
);
413 event
->mmap2
.tid
= bswap_32(event
->mmap2
.tid
);
414 event
->mmap2
.start
= bswap_64(event
->mmap2
.start
);
415 event
->mmap2
.len
= bswap_64(event
->mmap2
.len
);
416 event
->mmap2
.pgoff
= bswap_64(event
->mmap2
.pgoff
);
417 event
->mmap2
.maj
= bswap_32(event
->mmap2
.maj
);
418 event
->mmap2
.min
= bswap_32(event
->mmap2
.min
);
419 event
->mmap2
.ino
= bswap_64(event
->mmap2
.ino
);
422 void *data
= &event
->mmap2
.filename
;
424 data
+= PERF_ALIGN(strlen(data
) + 1, sizeof(u64
));
425 swap_sample_id_all(event
, data
);
428 static void perf_event__task_swap(union perf_event
*event
, bool sample_id_all
)
430 event
->fork
.pid
= bswap_32(event
->fork
.pid
);
431 event
->fork
.tid
= bswap_32(event
->fork
.tid
);
432 event
->fork
.ppid
= bswap_32(event
->fork
.ppid
);
433 event
->fork
.ptid
= bswap_32(event
->fork
.ptid
);
434 event
->fork
.time
= bswap_64(event
->fork
.time
);
437 swap_sample_id_all(event
, &event
->fork
+ 1);
440 static void perf_event__read_swap(union perf_event
*event
, bool sample_id_all
)
442 event
->read
.pid
= bswap_32(event
->read
.pid
);
443 event
->read
.tid
= bswap_32(event
->read
.tid
);
444 event
->read
.value
= bswap_64(event
->read
.value
);
445 event
->read
.time_enabled
= bswap_64(event
->read
.time_enabled
);
446 event
->read
.time_running
= bswap_64(event
->read
.time_running
);
447 event
->read
.id
= bswap_64(event
->read
.id
);
450 swap_sample_id_all(event
, &event
->read
+ 1);
453 static void perf_event__aux_swap(union perf_event
*event
, bool sample_id_all
)
455 event
->aux
.aux_offset
= bswap_64(event
->aux
.aux_offset
);
456 event
->aux
.aux_size
= bswap_64(event
->aux
.aux_size
);
457 event
->aux
.flags
= bswap_64(event
->aux
.flags
);
460 swap_sample_id_all(event
, &event
->aux
+ 1);
463 static void perf_event__itrace_start_swap(union perf_event
*event
,
466 event
->itrace_start
.pid
= bswap_32(event
->itrace_start
.pid
);
467 event
->itrace_start
.tid
= bswap_32(event
->itrace_start
.tid
);
470 swap_sample_id_all(event
, &event
->itrace_start
+ 1);
473 static void perf_event__switch_swap(union perf_event
*event
, bool sample_id_all
)
475 if (event
->header
.type
== PERF_RECORD_SWITCH_CPU_WIDE
) {
476 event
->context_switch
.next_prev_pid
=
477 bswap_32(event
->context_switch
.next_prev_pid
);
478 event
->context_switch
.next_prev_tid
=
479 bswap_32(event
->context_switch
.next_prev_tid
);
483 swap_sample_id_all(event
, &event
->context_switch
+ 1);
486 static void perf_event__throttle_swap(union perf_event
*event
,
489 event
->throttle
.time
= bswap_64(event
->throttle
.time
);
490 event
->throttle
.id
= bswap_64(event
->throttle
.id
);
491 event
->throttle
.stream_id
= bswap_64(event
->throttle
.stream_id
);
494 swap_sample_id_all(event
, &event
->throttle
+ 1);
497 static u8
revbyte(u8 b
)
499 int rev
= (b
>> 4) | ((b
& 0xf) << 4);
500 rev
= ((rev
& 0xcc) >> 2) | ((rev
& 0x33) << 2);
501 rev
= ((rev
& 0xaa) >> 1) | ((rev
& 0x55) << 1);
506 * XXX this is hack in attempt to carry flags bitfield
507 * throught endian village. ABI says:
509 * Bit-fields are allocated from right to left (least to most significant)
510 * on little-endian implementations and from left to right (most to least
511 * significant) on big-endian implementations.
513 * The above seems to be byte specific, so we need to reverse each
514 * byte of the bitfield. 'Internet' also says this might be implementation
515 * specific and we probably need proper fix and carry perf_event_attr
516 * bitfield flags in separate data file FEAT_ section. Thought this seems
519 static void swap_bitfield(u8
*p
, unsigned len
)
523 for (i
= 0; i
< len
; i
++) {
529 /* exported for swapping attributes in file header */
530 void perf_event__attr_swap(struct perf_event_attr
*attr
)
532 attr
->type
= bswap_32(attr
->type
);
533 attr
->size
= bswap_32(attr
->size
);
535 #define bswap_safe(f, n) \
536 (attr->size > (offsetof(struct perf_event_attr, f) + \
537 sizeof(attr->f) * (n)))
538 #define bswap_field(f, sz) \
540 if (bswap_safe(f, 0)) \
541 attr->f = bswap_##sz(attr->f); \
543 #define bswap_field_32(f) bswap_field(f, 32)
544 #define bswap_field_64(f) bswap_field(f, 64)
546 bswap_field_64(config
);
547 bswap_field_64(sample_period
);
548 bswap_field_64(sample_type
);
549 bswap_field_64(read_format
);
550 bswap_field_32(wakeup_events
);
551 bswap_field_32(bp_type
);
552 bswap_field_64(bp_addr
);
553 bswap_field_64(bp_len
);
554 bswap_field_64(branch_sample_type
);
555 bswap_field_64(sample_regs_user
);
556 bswap_field_32(sample_stack_user
);
557 bswap_field_32(aux_watermark
);
560 * After read_format are bitfields. Check read_format because
561 * we are unable to use offsetof on bitfield.
563 if (bswap_safe(read_format
, 1))
564 swap_bitfield((u8
*) (&attr
->read_format
+ 1),
566 #undef bswap_field_64
567 #undef bswap_field_32
572 static void perf_event__hdr_attr_swap(union perf_event
*event
,
573 bool sample_id_all __maybe_unused
)
577 perf_event__attr_swap(&event
->attr
.attr
);
579 size
= event
->header
.size
;
580 size
-= (void *)&event
->attr
.id
- (void *)event
;
581 mem_bswap_64(event
->attr
.id
, size
);
584 static void perf_event__event_type_swap(union perf_event
*event
,
585 bool sample_id_all __maybe_unused
)
587 event
->event_type
.event_type
.event_id
=
588 bswap_64(event
->event_type
.event_type
.event_id
);
591 static void perf_event__tracing_data_swap(union perf_event
*event
,
592 bool sample_id_all __maybe_unused
)
594 event
->tracing_data
.size
= bswap_32(event
->tracing_data
.size
);
597 static void perf_event__auxtrace_info_swap(union perf_event
*event
,
598 bool sample_id_all __maybe_unused
)
602 event
->auxtrace_info
.type
= bswap_32(event
->auxtrace_info
.type
);
604 size
= event
->header
.size
;
605 size
-= (void *)&event
->auxtrace_info
.priv
- (void *)event
;
606 mem_bswap_64(event
->auxtrace_info
.priv
, size
);
609 static void perf_event__auxtrace_swap(union perf_event
*event
,
610 bool sample_id_all __maybe_unused
)
612 event
->auxtrace
.size
= bswap_64(event
->auxtrace
.size
);
613 event
->auxtrace
.offset
= bswap_64(event
->auxtrace
.offset
);
614 event
->auxtrace
.reference
= bswap_64(event
->auxtrace
.reference
);
615 event
->auxtrace
.idx
= bswap_32(event
->auxtrace
.idx
);
616 event
->auxtrace
.tid
= bswap_32(event
->auxtrace
.tid
);
617 event
->auxtrace
.cpu
= bswap_32(event
->auxtrace
.cpu
);
620 static void perf_event__auxtrace_error_swap(union perf_event
*event
,
621 bool sample_id_all __maybe_unused
)
623 event
->auxtrace_error
.type
= bswap_32(event
->auxtrace_error
.type
);
624 event
->auxtrace_error
.code
= bswap_32(event
->auxtrace_error
.code
);
625 event
->auxtrace_error
.cpu
= bswap_32(event
->auxtrace_error
.cpu
);
626 event
->auxtrace_error
.pid
= bswap_32(event
->auxtrace_error
.pid
);
627 event
->auxtrace_error
.tid
= bswap_32(event
->auxtrace_error
.tid
);
628 event
->auxtrace_error
.ip
= bswap_64(event
->auxtrace_error
.ip
);
631 static void perf_event__thread_map_swap(union perf_event
*event
,
632 bool sample_id_all __maybe_unused
)
636 event
->thread_map
.nr
= bswap_64(event
->thread_map
.nr
);
638 for (i
= 0; i
< event
->thread_map
.nr
; i
++)
639 event
->thread_map
.entries
[i
].pid
= bswap_64(event
->thread_map
.entries
[i
].pid
);
642 typedef void (*perf_event__swap_op
)(union perf_event
*event
,
645 static perf_event__swap_op perf_event__swap_ops
[] = {
646 [PERF_RECORD_MMAP
] = perf_event__mmap_swap
,
647 [PERF_RECORD_MMAP2
] = perf_event__mmap2_swap
,
648 [PERF_RECORD_COMM
] = perf_event__comm_swap
,
649 [PERF_RECORD_FORK
] = perf_event__task_swap
,
650 [PERF_RECORD_EXIT
] = perf_event__task_swap
,
651 [PERF_RECORD_LOST
] = perf_event__all64_swap
,
652 [PERF_RECORD_READ
] = perf_event__read_swap
,
653 [PERF_RECORD_THROTTLE
] = perf_event__throttle_swap
,
654 [PERF_RECORD_UNTHROTTLE
] = perf_event__throttle_swap
,
655 [PERF_RECORD_SAMPLE
] = perf_event__all64_swap
,
656 [PERF_RECORD_AUX
] = perf_event__aux_swap
,
657 [PERF_RECORD_ITRACE_START
] = perf_event__itrace_start_swap
,
658 [PERF_RECORD_LOST_SAMPLES
] = perf_event__all64_swap
,
659 [PERF_RECORD_SWITCH
] = perf_event__switch_swap
,
660 [PERF_RECORD_SWITCH_CPU_WIDE
] = perf_event__switch_swap
,
661 [PERF_RECORD_HEADER_ATTR
] = perf_event__hdr_attr_swap
,
662 [PERF_RECORD_HEADER_EVENT_TYPE
] = perf_event__event_type_swap
,
663 [PERF_RECORD_HEADER_TRACING_DATA
] = perf_event__tracing_data_swap
,
664 [PERF_RECORD_HEADER_BUILD_ID
] = NULL
,
665 [PERF_RECORD_ID_INDEX
] = perf_event__all64_swap
,
666 [PERF_RECORD_AUXTRACE_INFO
] = perf_event__auxtrace_info_swap
,
667 [PERF_RECORD_AUXTRACE
] = perf_event__auxtrace_swap
,
668 [PERF_RECORD_AUXTRACE_ERROR
] = perf_event__auxtrace_error_swap
,
669 [PERF_RECORD_THREAD_MAP
] = perf_event__thread_map_swap
,
670 [PERF_RECORD_HEADER_MAX
] = NULL
,
674 * When perf record finishes a pass on every buffers, it records this pseudo
676 * We record the max timestamp t found in the pass n.
677 * Assuming these timestamps are monotonic across cpus, we know that if
678 * a buffer still has events with timestamps below t, they will be all
679 * available and then read in the pass n + 1.
680 * Hence when we start to read the pass n + 2, we can safely flush every
681 * events with timestamps below t.
683 * ============ PASS n =================
686 * cnt1 timestamps | cnt2 timestamps
689 * - | 4 <--- max recorded
691 * ============ PASS n + 1 ==============
694 * cnt1 timestamps | cnt2 timestamps
697 * 5 | 7 <---- max recorded
699 * Flush every events below timestamp 4
701 * ============ PASS n + 2 ==============
704 * cnt1 timestamps | cnt2 timestamps
709 * Flush every events below timestamp 7
712 static int process_finished_round(struct perf_tool
*tool __maybe_unused
,
713 union perf_event
*event __maybe_unused
,
714 struct ordered_events
*oe
)
717 fprintf(stdout
, "\n");
718 return ordered_events__flush(oe
, OE_FLUSH__ROUND
);
721 int perf_session__queue_event(struct perf_session
*s
, union perf_event
*event
,
722 struct perf_sample
*sample
, u64 file_offset
)
724 return ordered_events__queue(&s
->ordered_events
, event
, sample
, file_offset
);
727 static void callchain__lbr_callstack_printf(struct perf_sample
*sample
)
729 struct ip_callchain
*callchain
= sample
->callchain
;
730 struct branch_stack
*lbr_stack
= sample
->branch_stack
;
731 u64 kernel_callchain_nr
= callchain
->nr
;
734 for (i
= 0; i
< kernel_callchain_nr
; i
++) {
735 if (callchain
->ips
[i
] == PERF_CONTEXT_USER
)
739 if ((i
!= kernel_callchain_nr
) && lbr_stack
->nr
) {
742 * LBR callstack can only get user call chain,
743 * i is kernel call chain number,
744 * 1 is PERF_CONTEXT_USER.
746 * The user call chain is stored in LBR registers.
747 * LBR are pair registers. The caller is stored
748 * in "from" register, while the callee is stored
750 * For example, there is a call stack
751 * "A"->"B"->"C"->"D".
752 * The LBR registers will recorde like
753 * "C"->"D", "B"->"C", "A"->"B".
754 * So only the first "to" register and all "from"
755 * registers are needed to construct the whole stack.
757 total_nr
= i
+ 1 + lbr_stack
->nr
+ 1;
758 kernel_callchain_nr
= i
+ 1;
760 printf("... LBR call chain: nr:%" PRIu64
"\n", total_nr
);
762 for (i
= 0; i
< kernel_callchain_nr
; i
++)
763 printf("..... %2d: %016" PRIx64
"\n",
764 i
, callchain
->ips
[i
]);
766 printf("..... %2d: %016" PRIx64
"\n",
767 (int)(kernel_callchain_nr
), lbr_stack
->entries
[0].to
);
768 for (i
= 0; i
< lbr_stack
->nr
; i
++)
769 printf("..... %2d: %016" PRIx64
"\n",
770 (int)(i
+ kernel_callchain_nr
+ 1), lbr_stack
->entries
[i
].from
);
774 static void callchain__printf(struct perf_evsel
*evsel
,
775 struct perf_sample
*sample
)
778 struct ip_callchain
*callchain
= sample
->callchain
;
780 if (has_branch_callstack(evsel
))
781 callchain__lbr_callstack_printf(sample
);
783 printf("... FP chain: nr:%" PRIu64
"\n", callchain
->nr
);
785 for (i
= 0; i
< callchain
->nr
; i
++)
786 printf("..... %2d: %016" PRIx64
"\n",
787 i
, callchain
->ips
[i
]);
790 static void branch_stack__printf(struct perf_sample
*sample
)
794 printf("... branch stack: nr:%" PRIu64
"\n", sample
->branch_stack
->nr
);
796 for (i
= 0; i
< sample
->branch_stack
->nr
; i
++) {
797 struct branch_entry
*e
= &sample
->branch_stack
->entries
[i
];
799 printf("..... %2"PRIu64
": %016" PRIx64
" -> %016" PRIx64
" %hu cycles %s%s%s%s %x\n",
802 e
->flags
.mispred
? "M" : " ",
803 e
->flags
.predicted
? "P" : " ",
804 e
->flags
.abort
? "A" : " ",
805 e
->flags
.in_tx
? "T" : " ",
806 (unsigned)e
->flags
.reserved
);
810 static void regs_dump__printf(u64 mask
, u64
*regs
)
814 for_each_set_bit(rid
, (unsigned long *) &mask
, sizeof(mask
) * 8) {
817 printf(".... %-5s 0x%" PRIx64
"\n",
818 perf_reg_name(rid
), val
);
822 static const char *regs_abi
[] = {
823 [PERF_SAMPLE_REGS_ABI_NONE
] = "none",
824 [PERF_SAMPLE_REGS_ABI_32
] = "32-bit",
825 [PERF_SAMPLE_REGS_ABI_64
] = "64-bit",
828 static inline const char *regs_dump_abi(struct regs_dump
*d
)
830 if (d
->abi
> PERF_SAMPLE_REGS_ABI_64
)
833 return regs_abi
[d
->abi
];
836 static void regs__printf(const char *type
, struct regs_dump
*regs
)
838 u64 mask
= regs
->mask
;
840 printf("... %s regs: mask 0x%" PRIx64
" ABI %s\n",
843 regs_dump_abi(regs
));
845 regs_dump__printf(mask
, regs
->regs
);
848 static void regs_user__printf(struct perf_sample
*sample
)
850 struct regs_dump
*user_regs
= &sample
->user_regs
;
853 regs__printf("user", user_regs
);
856 static void regs_intr__printf(struct perf_sample
*sample
)
858 struct regs_dump
*intr_regs
= &sample
->intr_regs
;
861 regs__printf("intr", intr_regs
);
864 static void stack_user__printf(struct stack_dump
*dump
)
866 printf("... ustack: size %" PRIu64
", offset 0x%x\n",
867 dump
->size
, dump
->offset
);
870 static void perf_evlist__print_tstamp(struct perf_evlist
*evlist
,
871 union perf_event
*event
,
872 struct perf_sample
*sample
)
874 u64 sample_type
= __perf_evlist__combined_sample_type(evlist
);
876 if (event
->header
.type
!= PERF_RECORD_SAMPLE
&&
877 !perf_evlist__sample_id_all(evlist
)) {
878 fputs("-1 -1 ", stdout
);
882 if ((sample_type
& PERF_SAMPLE_CPU
))
883 printf("%u ", sample
->cpu
);
885 if (sample_type
& PERF_SAMPLE_TIME
)
886 printf("%" PRIu64
" ", sample
->time
);
889 static void sample_read__printf(struct perf_sample
*sample
, u64 read_format
)
891 printf("... sample_read:\n");
893 if (read_format
& PERF_FORMAT_TOTAL_TIME_ENABLED
)
894 printf("...... time enabled %016" PRIx64
"\n",
895 sample
->read
.time_enabled
);
897 if (read_format
& PERF_FORMAT_TOTAL_TIME_RUNNING
)
898 printf("...... time running %016" PRIx64
"\n",
899 sample
->read
.time_running
);
901 if (read_format
& PERF_FORMAT_GROUP
) {
904 printf(".... group nr %" PRIu64
"\n", sample
->read
.group
.nr
);
906 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
907 struct sample_read_value
*value
;
909 value
= &sample
->read
.group
.values
[i
];
910 printf("..... id %016" PRIx64
911 ", value %016" PRIx64
"\n",
912 value
->id
, value
->value
);
915 printf("..... id %016" PRIx64
", value %016" PRIx64
"\n",
916 sample
->read
.one
.id
, sample
->read
.one
.value
);
919 static void dump_event(struct perf_evlist
*evlist
, union perf_event
*event
,
920 u64 file_offset
, struct perf_sample
*sample
)
925 printf("\n%#" PRIx64
" [%#x]: event: %d\n",
926 file_offset
, event
->header
.size
, event
->header
.type
);
931 perf_evlist__print_tstamp(evlist
, event
, sample
);
933 printf("%#" PRIx64
" [%#x]: PERF_RECORD_%s", file_offset
,
934 event
->header
.size
, perf_event__name(event
->header
.type
));
937 static void dump_sample(struct perf_evsel
*evsel
, union perf_event
*event
,
938 struct perf_sample
*sample
)
945 printf("(IP, 0x%x): %d/%d: %#" PRIx64
" period: %" PRIu64
" addr: %#" PRIx64
"\n",
946 event
->header
.misc
, sample
->pid
, sample
->tid
, sample
->ip
,
947 sample
->period
, sample
->addr
);
949 sample_type
= evsel
->attr
.sample_type
;
951 if (sample_type
& PERF_SAMPLE_CALLCHAIN
)
952 callchain__printf(evsel
, sample
);
954 if ((sample_type
& PERF_SAMPLE_BRANCH_STACK
) && !has_branch_callstack(evsel
))
955 branch_stack__printf(sample
);
957 if (sample_type
& PERF_SAMPLE_REGS_USER
)
958 regs_user__printf(sample
);
960 if (sample_type
& PERF_SAMPLE_REGS_INTR
)
961 regs_intr__printf(sample
);
963 if (sample_type
& PERF_SAMPLE_STACK_USER
)
964 stack_user__printf(&sample
->user_stack
);
966 if (sample_type
& PERF_SAMPLE_WEIGHT
)
967 printf("... weight: %" PRIu64
"\n", sample
->weight
);
969 if (sample_type
& PERF_SAMPLE_DATA_SRC
)
970 printf(" . data_src: 0x%"PRIx64
"\n", sample
->data_src
);
972 if (sample_type
& PERF_SAMPLE_TRANSACTION
)
973 printf("... transaction: %" PRIx64
"\n", sample
->transaction
);
975 if (sample_type
& PERF_SAMPLE_READ
)
976 sample_read__printf(sample
, evsel
->attr
.read_format
);
979 static struct machine
*machines__find_for_cpumode(struct machines
*machines
,
980 union perf_event
*event
,
981 struct perf_sample
*sample
)
983 const u8 cpumode
= event
->header
.misc
& PERF_RECORD_MISC_CPUMODE_MASK
;
984 struct machine
*machine
;
987 ((cpumode
== PERF_RECORD_MISC_GUEST_KERNEL
) ||
988 (cpumode
== PERF_RECORD_MISC_GUEST_USER
))) {
991 if (event
->header
.type
== PERF_RECORD_MMAP
992 || event
->header
.type
== PERF_RECORD_MMAP2
)
993 pid
= event
->mmap
.pid
;
997 machine
= machines__find(machines
, pid
);
999 machine
= machines__find(machines
, DEFAULT_GUEST_KERNEL_ID
);
1003 return &machines
->host
;
1006 static int deliver_sample_value(struct perf_evlist
*evlist
,
1007 struct perf_tool
*tool
,
1008 union perf_event
*event
,
1009 struct perf_sample
*sample
,
1010 struct sample_read_value
*v
,
1011 struct machine
*machine
)
1013 struct perf_sample_id
*sid
= perf_evlist__id2sid(evlist
, v
->id
);
1017 sample
->period
= v
->value
- sid
->period
;
1018 sid
->period
= v
->value
;
1021 if (!sid
|| sid
->evsel
== NULL
) {
1022 ++evlist
->stats
.nr_unknown_id
;
1026 return tool
->sample(tool
, event
, sample
, sid
->evsel
, machine
);
1029 static int deliver_sample_group(struct perf_evlist
*evlist
,
1030 struct perf_tool
*tool
,
1031 union perf_event
*event
,
1032 struct perf_sample
*sample
,
1033 struct machine
*machine
)
1038 for (i
= 0; i
< sample
->read
.group
.nr
; i
++) {
1039 ret
= deliver_sample_value(evlist
, tool
, event
, sample
,
1040 &sample
->read
.group
.values
[i
],
1050 perf_evlist__deliver_sample(struct perf_evlist
*evlist
,
1051 struct perf_tool
*tool
,
1052 union perf_event
*event
,
1053 struct perf_sample
*sample
,
1054 struct perf_evsel
*evsel
,
1055 struct machine
*machine
)
1057 /* We know evsel != NULL. */
1058 u64 sample_type
= evsel
->attr
.sample_type
;
1059 u64 read_format
= evsel
->attr
.read_format
;
1061 /* Standard sample delievery. */
1062 if (!(sample_type
& PERF_SAMPLE_READ
))
1063 return tool
->sample(tool
, event
, sample
, evsel
, machine
);
1065 /* For PERF_SAMPLE_READ we have either single or group mode. */
1066 if (read_format
& PERF_FORMAT_GROUP
)
1067 return deliver_sample_group(evlist
, tool
, event
, sample
,
1070 return deliver_sample_value(evlist
, tool
, event
, sample
,
1071 &sample
->read
.one
, machine
);
1074 static int machines__deliver_event(struct machines
*machines
,
1075 struct perf_evlist
*evlist
,
1076 union perf_event
*event
,
1077 struct perf_sample
*sample
,
1078 struct perf_tool
*tool
, u64 file_offset
)
1080 struct perf_evsel
*evsel
;
1081 struct machine
*machine
;
1083 dump_event(evlist
, event
, file_offset
, sample
);
1085 evsel
= perf_evlist__id2evsel(evlist
, sample
->id
);
1087 machine
= machines__find_for_cpumode(machines
, event
, sample
);
1089 switch (event
->header
.type
) {
1090 case PERF_RECORD_SAMPLE
:
1091 if (evsel
== NULL
) {
1092 ++evlist
->stats
.nr_unknown_id
;
1095 dump_sample(evsel
, event
, sample
);
1096 if (machine
== NULL
) {
1097 ++evlist
->stats
.nr_unprocessable_samples
;
1100 return perf_evlist__deliver_sample(evlist
, tool
, event
, sample
, evsel
, machine
);
1101 case PERF_RECORD_MMAP
:
1102 return tool
->mmap(tool
, event
, sample
, machine
);
1103 case PERF_RECORD_MMAP2
:
1104 if (event
->header
.misc
& PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT
)
1105 ++evlist
->stats
.nr_proc_map_timeout
;
1106 return tool
->mmap2(tool
, event
, sample
, machine
);
1107 case PERF_RECORD_COMM
:
1108 return tool
->comm(tool
, event
, sample
, machine
);
1109 case PERF_RECORD_FORK
:
1110 return tool
->fork(tool
, event
, sample
, machine
);
1111 case PERF_RECORD_EXIT
:
1112 return tool
->exit(tool
, event
, sample
, machine
);
1113 case PERF_RECORD_LOST
:
1114 if (tool
->lost
== perf_event__process_lost
)
1115 evlist
->stats
.total_lost
+= event
->lost
.lost
;
1116 return tool
->lost(tool
, event
, sample
, machine
);
1117 case PERF_RECORD_LOST_SAMPLES
:
1118 if (tool
->lost_samples
== perf_event__process_lost_samples
)
1119 evlist
->stats
.total_lost_samples
+= event
->lost_samples
.lost
;
1120 return tool
->lost_samples(tool
, event
, sample
, machine
);
1121 case PERF_RECORD_READ
:
1122 return tool
->read(tool
, event
, sample
, evsel
, machine
);
1123 case PERF_RECORD_THROTTLE
:
1124 return tool
->throttle(tool
, event
, sample
, machine
);
1125 case PERF_RECORD_UNTHROTTLE
:
1126 return tool
->unthrottle(tool
, event
, sample
, machine
);
1127 case PERF_RECORD_AUX
:
1128 if (tool
->aux
== perf_event__process_aux
&&
1129 (event
->aux
.flags
& PERF_AUX_FLAG_TRUNCATED
))
1130 evlist
->stats
.total_aux_lost
+= 1;
1131 return tool
->aux(tool
, event
, sample
, machine
);
1132 case PERF_RECORD_ITRACE_START
:
1133 return tool
->itrace_start(tool
, event
, sample
, machine
);
1134 case PERF_RECORD_SWITCH
:
1135 case PERF_RECORD_SWITCH_CPU_WIDE
:
1136 return tool
->context_switch(tool
, event
, sample
, machine
);
1138 ++evlist
->stats
.nr_unknown_events
;
1143 static int perf_session__deliver_event(struct perf_session
*session
,
1144 union perf_event
*event
,
1145 struct perf_sample
*sample
,
1146 struct perf_tool
*tool
,
1151 ret
= auxtrace__process_event(session
, event
, sample
, tool
);
1157 return machines__deliver_event(&session
->machines
, session
->evlist
,
1158 event
, sample
, tool
, file_offset
);
1161 static s64
perf_session__process_user_event(struct perf_session
*session
,
1162 union perf_event
*event
,
1165 struct ordered_events
*oe
= &session
->ordered_events
;
1166 struct perf_tool
*tool
= session
->tool
;
1167 int fd
= perf_data_file__fd(session
->file
);
1170 dump_event(session
->evlist
, event
, file_offset
, NULL
);
1172 /* These events are processed right away */
1173 switch (event
->header
.type
) {
1174 case PERF_RECORD_HEADER_ATTR
:
1175 err
= tool
->attr(tool
, event
, &session
->evlist
);
1177 perf_session__set_id_hdr_size(session
);
1178 perf_session__set_comm_exec(session
);
1181 case PERF_RECORD_HEADER_EVENT_TYPE
:
1183 * Depreceated, but we need to handle it for sake
1184 * of old data files create in pipe mode.
1187 case PERF_RECORD_HEADER_TRACING_DATA
:
1188 /* setup for reading amidst mmap */
1189 lseek(fd
, file_offset
, SEEK_SET
);
1190 return tool
->tracing_data(tool
, event
, session
);
1191 case PERF_RECORD_HEADER_BUILD_ID
:
1192 return tool
->build_id(tool
, event
, session
);
1193 case PERF_RECORD_FINISHED_ROUND
:
1194 return tool
->finished_round(tool
, event
, oe
);
1195 case PERF_RECORD_ID_INDEX
:
1196 return tool
->id_index(tool
, event
, session
);
1197 case PERF_RECORD_AUXTRACE_INFO
:
1198 return tool
->auxtrace_info(tool
, event
, session
);
1199 case PERF_RECORD_AUXTRACE
:
1200 /* setup for reading amidst mmap */
1201 lseek(fd
, file_offset
+ event
->header
.size
, SEEK_SET
);
1202 return tool
->auxtrace(tool
, event
, session
);
1203 case PERF_RECORD_AUXTRACE_ERROR
:
1204 perf_session__auxtrace_error_inc(session
, event
);
1205 return tool
->auxtrace_error(tool
, event
, session
);
1206 case PERF_RECORD_THREAD_MAP
:
1207 return tool
->thread_map(tool
, event
, session
);
1213 int perf_session__deliver_synth_event(struct perf_session
*session
,
1214 union perf_event
*event
,
1215 struct perf_sample
*sample
)
1217 struct perf_evlist
*evlist
= session
->evlist
;
1218 struct perf_tool
*tool
= session
->tool
;
1220 events_stats__inc(&evlist
->stats
, event
->header
.type
);
1222 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1223 return perf_session__process_user_event(session
, event
, 0);
1225 return machines__deliver_event(&session
->machines
, evlist
, event
, sample
, tool
, 0);
1228 static void event_swap(union perf_event
*event
, bool sample_id_all
)
1230 perf_event__swap_op swap
;
1232 swap
= perf_event__swap_ops
[event
->header
.type
];
1234 swap(event
, sample_id_all
);
1237 int perf_session__peek_event(struct perf_session
*session
, off_t file_offset
,
1238 void *buf
, size_t buf_sz
,
1239 union perf_event
**event_ptr
,
1240 struct perf_sample
*sample
)
1242 union perf_event
*event
;
1243 size_t hdr_sz
, rest
;
1246 if (session
->one_mmap
&& !session
->header
.needs_swap
) {
1247 event
= file_offset
- session
->one_mmap_offset
+
1248 session
->one_mmap_addr
;
1249 goto out_parse_sample
;
1252 if (perf_data_file__is_pipe(session
->file
))
1255 fd
= perf_data_file__fd(session
->file
);
1256 hdr_sz
= sizeof(struct perf_event_header
);
1258 if (buf_sz
< hdr_sz
)
1261 if (lseek(fd
, file_offset
, SEEK_SET
) == (off_t
)-1 ||
1262 readn(fd
, buf
, hdr_sz
) != (ssize_t
)hdr_sz
)
1265 event
= (union perf_event
*)buf
;
1267 if (session
->header
.needs_swap
)
1268 perf_event_header__bswap(&event
->header
);
1270 if (event
->header
.size
< hdr_sz
|| event
->header
.size
> buf_sz
)
1273 rest
= event
->header
.size
- hdr_sz
;
1275 if (readn(fd
, buf
, rest
) != (ssize_t
)rest
)
1278 if (session
->header
.needs_swap
)
1279 event_swap(event
, perf_evlist__sample_id_all(session
->evlist
));
1283 if (sample
&& event
->header
.type
< PERF_RECORD_USER_TYPE_START
&&
1284 perf_evlist__parse_sample(session
->evlist
, event
, sample
))
1292 static s64
perf_session__process_event(struct perf_session
*session
,
1293 union perf_event
*event
, u64 file_offset
)
1295 struct perf_evlist
*evlist
= session
->evlist
;
1296 struct perf_tool
*tool
= session
->tool
;
1297 struct perf_sample sample
;
1300 if (session
->header
.needs_swap
)
1301 event_swap(event
, perf_evlist__sample_id_all(evlist
));
1303 if (event
->header
.type
>= PERF_RECORD_HEADER_MAX
)
1306 events_stats__inc(&evlist
->stats
, event
->header
.type
);
1308 if (event
->header
.type
>= PERF_RECORD_USER_TYPE_START
)
1309 return perf_session__process_user_event(session
, event
, file_offset
);
1312 * For all kernel events we get the sample data
1314 ret
= perf_evlist__parse_sample(evlist
, event
, &sample
);
1318 if (tool
->ordered_events
) {
1319 ret
= perf_session__queue_event(session
, event
, &sample
, file_offset
);
1324 return perf_session__deliver_event(session
, event
, &sample
, tool
,
1328 void perf_event_header__bswap(struct perf_event_header
*hdr
)
1330 hdr
->type
= bswap_32(hdr
->type
);
1331 hdr
->misc
= bswap_16(hdr
->misc
);
1332 hdr
->size
= bswap_16(hdr
->size
);
1335 struct thread
*perf_session__findnew(struct perf_session
*session
, pid_t pid
)
1337 return machine__findnew_thread(&session
->machines
.host
, -1, pid
);
1340 int perf_session__register_idle_thread(struct perf_session
*session
)
1342 struct thread
*thread
;
1345 thread
= machine__findnew_thread(&session
->machines
.host
, 0, 0);
1346 if (thread
== NULL
|| thread__set_comm(thread
, "swapper", 0)) {
1347 pr_err("problem inserting idle task.\n");
1351 /* machine__findnew_thread() got the thread, so put it */
1352 thread__put(thread
);
1356 static void perf_session__warn_about_errors(const struct perf_session
*session
)
1358 const struct events_stats
*stats
= &session
->evlist
->stats
;
1359 const struct ordered_events
*oe
= &session
->ordered_events
;
1361 if (session
->tool
->lost
== perf_event__process_lost
&&
1362 stats
->nr_events
[PERF_RECORD_LOST
] != 0) {
1363 ui__warning("Processed %d events and lost %d chunks!\n\n"
1364 "Check IO/CPU overload!\n\n",
1365 stats
->nr_events
[0],
1366 stats
->nr_events
[PERF_RECORD_LOST
]);
1369 if (session
->tool
->lost_samples
== perf_event__process_lost_samples
) {
1372 drop_rate
= (double)stats
->total_lost_samples
/
1373 (double) (stats
->nr_events
[PERF_RECORD_SAMPLE
] + stats
->total_lost_samples
);
1374 if (drop_rate
> 0.05) {
1375 ui__warning("Processed %" PRIu64
" samples and lost %3.2f%% samples!\n\n",
1376 stats
->nr_events
[PERF_RECORD_SAMPLE
] + stats
->total_lost_samples
,
1381 if (session
->tool
->aux
== perf_event__process_aux
&&
1382 stats
->total_aux_lost
!= 0) {
1383 ui__warning("AUX data lost %" PRIu64
" times out of %u!\n\n",
1384 stats
->total_aux_lost
,
1385 stats
->nr_events
[PERF_RECORD_AUX
]);
1388 if (stats
->nr_unknown_events
!= 0) {
1389 ui__warning("Found %u unknown events!\n\n"
1390 "Is this an older tool processing a perf.data "
1391 "file generated by a more recent tool?\n\n"
1392 "If that is not the case, consider "
1393 "reporting to linux-kernel@vger.kernel.org.\n\n",
1394 stats
->nr_unknown_events
);
1397 if (stats
->nr_unknown_id
!= 0) {
1398 ui__warning("%u samples with id not present in the header\n",
1399 stats
->nr_unknown_id
);
1402 if (stats
->nr_invalid_chains
!= 0) {
1403 ui__warning("Found invalid callchains!\n\n"
1404 "%u out of %u events were discarded for this reason.\n\n"
1405 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1406 stats
->nr_invalid_chains
,
1407 stats
->nr_events
[PERF_RECORD_SAMPLE
]);
1410 if (stats
->nr_unprocessable_samples
!= 0) {
1411 ui__warning("%u unprocessable samples recorded.\n"
1412 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1413 stats
->nr_unprocessable_samples
);
1416 if (oe
->nr_unordered_events
!= 0)
1417 ui__warning("%u out of order events recorded.\n", oe
->nr_unordered_events
);
1419 events_stats__auxtrace_error_warn(stats
);
1421 if (stats
->nr_proc_map_timeout
!= 0) {
1422 ui__warning("%d map information files for pre-existing threads were\n"
1423 "not processed, if there are samples for addresses they\n"
1424 "will not be resolved, you may find out which are these\n"
1425 "threads by running with -v and redirecting the output\n"
1427 "The time limit to process proc map is too short?\n"
1428 "Increase it by --proc-map-timeout\n",
1429 stats
->nr_proc_map_timeout
);
1433 static int perf_session__flush_thread_stack(struct thread
*thread
,
1434 void *p __maybe_unused
)
1436 return thread_stack__flush(thread
);
1439 static int perf_session__flush_thread_stacks(struct perf_session
*session
)
1441 return machines__for_each_thread(&session
->machines
,
1442 perf_session__flush_thread_stack
,
1446 volatile int session_done
;
1448 static int __perf_session__process_pipe_events(struct perf_session
*session
)
1450 struct ordered_events
*oe
= &session
->ordered_events
;
1451 struct perf_tool
*tool
= session
->tool
;
1452 int fd
= perf_data_file__fd(session
->file
);
1453 union perf_event
*event
;
1454 uint32_t size
, cur_size
= 0;
1461 perf_tool__fill_defaults(tool
);
1464 cur_size
= sizeof(union perf_event
);
1466 buf
= malloc(cur_size
);
1471 err
= readn(fd
, event
, sizeof(struct perf_event_header
));
1476 pr_err("failed to read event header\n");
1480 if (session
->header
.needs_swap
)
1481 perf_event_header__bswap(&event
->header
);
1483 size
= event
->header
.size
;
1484 if (size
< sizeof(struct perf_event_header
)) {
1485 pr_err("bad event header size\n");
1489 if (size
> cur_size
) {
1490 void *new = realloc(buf
, size
);
1492 pr_err("failed to allocate memory to read event\n");
1500 p
+= sizeof(struct perf_event_header
);
1502 if (size
- sizeof(struct perf_event_header
)) {
1503 err
= readn(fd
, p
, size
- sizeof(struct perf_event_header
));
1506 pr_err("unexpected end of event stream\n");
1510 pr_err("failed to read event data\n");
1515 if ((skip
= perf_session__process_event(session
, event
, head
)) < 0) {
1516 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1517 head
, event
->header
.size
, event
->header
.type
);
1527 if (!session_done())
1530 /* do the final flush for ordered samples */
1531 err
= ordered_events__flush(oe
, OE_FLUSH__FINAL
);
1534 err
= auxtrace__flush_events(session
, tool
);
1537 err
= perf_session__flush_thread_stacks(session
);
1540 perf_session__warn_about_errors(session
);
1541 ordered_events__free(&session
->ordered_events
);
1542 auxtrace__free_events(session
);
1546 static union perf_event
*
1547 fetch_mmaped_event(struct perf_session
*session
,
1548 u64 head
, size_t mmap_size
, char *buf
)
1550 union perf_event
*event
;
1553 * Ensure we have enough space remaining to read
1554 * the size of the event in the headers.
1556 if (head
+ sizeof(event
->header
) > mmap_size
)
1559 event
= (union perf_event
*)(buf
+ head
);
1561 if (session
->header
.needs_swap
)
1562 perf_event_header__bswap(&event
->header
);
1564 if (head
+ event
->header
.size
> mmap_size
) {
1565 /* We're not fetching the event so swap back again */
1566 if (session
->header
.needs_swap
)
1567 perf_event_header__bswap(&event
->header
);
1575 * On 64bit we can mmap the data file in one go. No need for tiny mmap
1576 * slices. On 32bit we use 32MB.
1578 #if BITS_PER_LONG == 64
1579 #define MMAP_SIZE ULLONG_MAX
1582 #define MMAP_SIZE (32 * 1024 * 1024ULL)
1583 #define NUM_MMAPS 128
1586 static int __perf_session__process_events(struct perf_session
*session
,
1587 u64 data_offset
, u64 data_size
,
1590 struct ordered_events
*oe
= &session
->ordered_events
;
1591 struct perf_tool
*tool
= session
->tool
;
1592 int fd
= perf_data_file__fd(session
->file
);
1593 u64 head
, page_offset
, file_offset
, file_pos
, size
;
1594 int err
, mmap_prot
, mmap_flags
, map_idx
= 0;
1596 char *buf
, *mmaps
[NUM_MMAPS
];
1597 union perf_event
*event
;
1598 struct ui_progress prog
;
1601 perf_tool__fill_defaults(tool
);
1603 page_offset
= page_size
* (data_offset
/ page_size
);
1604 file_offset
= page_offset
;
1605 head
= data_offset
- page_offset
;
1610 if (data_offset
+ data_size
< file_size
)
1611 file_size
= data_offset
+ data_size
;
1613 ui_progress__init(&prog
, file_size
, "Processing events...");
1615 mmap_size
= MMAP_SIZE
;
1616 if (mmap_size
> file_size
) {
1617 mmap_size
= file_size
;
1618 session
->one_mmap
= true;
1621 memset(mmaps
, 0, sizeof(mmaps
));
1623 mmap_prot
= PROT_READ
;
1624 mmap_flags
= MAP_SHARED
;
1626 if (session
->header
.needs_swap
) {
1627 mmap_prot
|= PROT_WRITE
;
1628 mmap_flags
= MAP_PRIVATE
;
1631 buf
= mmap(NULL
, mmap_size
, mmap_prot
, mmap_flags
, fd
,
1633 if (buf
== MAP_FAILED
) {
1634 pr_err("failed to mmap file\n");
1638 mmaps
[map_idx
] = buf
;
1639 map_idx
= (map_idx
+ 1) & (ARRAY_SIZE(mmaps
) - 1);
1640 file_pos
= file_offset
+ head
;
1641 if (session
->one_mmap
) {
1642 session
->one_mmap_addr
= buf
;
1643 session
->one_mmap_offset
= file_offset
;
1647 event
= fetch_mmaped_event(session
, head
, mmap_size
, buf
);
1649 if (mmaps
[map_idx
]) {
1650 munmap(mmaps
[map_idx
], mmap_size
);
1651 mmaps
[map_idx
] = NULL
;
1654 page_offset
= page_size
* (head
/ page_size
);
1655 file_offset
+= page_offset
;
1656 head
-= page_offset
;
1660 size
= event
->header
.size
;
1662 if (size
< sizeof(struct perf_event_header
) ||
1663 (skip
= perf_session__process_event(session
, event
, file_pos
)) < 0) {
1664 pr_err("%#" PRIx64
" [%#x]: failed to process type: %d\n",
1665 file_offset
+ head
, event
->header
.size
,
1666 event
->header
.type
);
1677 ui_progress__update(&prog
, size
);
1682 if (file_pos
< file_size
)
1686 /* do the final flush for ordered samples */
1687 err
= ordered_events__flush(oe
, OE_FLUSH__FINAL
);
1690 err
= auxtrace__flush_events(session
, tool
);
1693 err
= perf_session__flush_thread_stacks(session
);
1695 ui_progress__finish();
1696 perf_session__warn_about_errors(session
);
1697 ordered_events__free(&session
->ordered_events
);
1698 auxtrace__free_events(session
);
1699 session
->one_mmap
= false;
1703 int perf_session__process_events(struct perf_session
*session
)
1705 u64 size
= perf_data_file__size(session
->file
);
1708 if (perf_session__register_idle_thread(session
) < 0)
1711 if (!perf_data_file__is_pipe(session
->file
))
1712 err
= __perf_session__process_events(session
,
1713 session
->header
.data_offset
,
1714 session
->header
.data_size
, size
);
1716 err
= __perf_session__process_pipe_events(session
);
1721 bool perf_session__has_traces(struct perf_session
*session
, const char *msg
)
1723 struct perf_evsel
*evsel
;
1725 evlist__for_each(session
->evlist
, evsel
) {
1726 if (evsel
->attr
.type
== PERF_TYPE_TRACEPOINT
)
1730 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg
);
1734 int maps__set_kallsyms_ref_reloc_sym(struct map
**maps
,
1735 const char *symbol_name
, u64 addr
)
1739 struct ref_reloc_sym
*ref
;
1741 ref
= zalloc(sizeof(struct ref_reloc_sym
));
1745 ref
->name
= strdup(symbol_name
);
1746 if (ref
->name
== NULL
) {
1751 bracket
= strchr(ref
->name
, ']');
1757 for (i
= 0; i
< MAP__NR_TYPES
; ++i
) {
1758 struct kmap
*kmap
= map__kmap(maps
[i
]);
1762 kmap
->ref_reloc_sym
= ref
;
1768 size_t perf_session__fprintf_dsos(struct perf_session
*session
, FILE *fp
)
1770 return machines__fprintf_dsos(&session
->machines
, fp
);
1773 size_t perf_session__fprintf_dsos_buildid(struct perf_session
*session
, FILE *fp
,
1774 bool (skip
)(struct dso
*dso
, int parm
), int parm
)
1776 return machines__fprintf_dsos_buildid(&session
->machines
, fp
, skip
, parm
);
1779 size_t perf_session__fprintf_nr_events(struct perf_session
*session
, FILE *fp
)
1782 const char *msg
= "";
1784 if (perf_header__has_feat(&session
->header
, HEADER_AUXTRACE
))
1785 msg
= " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
1787 ret
= fprintf(fp
, "\nAggregated stats:%s\n", msg
);
1789 ret
+= events_stats__fprintf(&session
->evlist
->stats
, fp
);
1793 size_t perf_session__fprintf(struct perf_session
*session
, FILE *fp
)
1796 * FIXME: Here we have to actually print all the machines in this
1797 * session, not just the host...
1799 return machine__fprintf(&session
->machines
.host
, fp
);
1802 struct perf_evsel
*perf_session__find_first_evtype(struct perf_session
*session
,
1805 struct perf_evsel
*pos
;
1807 evlist__for_each(session
->evlist
, pos
) {
1808 if (pos
->attr
.type
== type
)
1814 void perf_evsel__print_ip(struct perf_evsel
*evsel
, struct perf_sample
*sample
,
1815 struct addr_location
*al
,
1816 unsigned int print_opts
, unsigned int stack_depth
)
1818 struct callchain_cursor_node
*node
;
1819 int print_ip
= print_opts
& PRINT_IP_OPT_IP
;
1820 int print_sym
= print_opts
& PRINT_IP_OPT_SYM
;
1821 int print_dso
= print_opts
& PRINT_IP_OPT_DSO
;
1822 int print_symoffset
= print_opts
& PRINT_IP_OPT_SYMOFFSET
;
1823 int print_oneline
= print_opts
& PRINT_IP_OPT_ONELINE
;
1824 int print_srcline
= print_opts
& PRINT_IP_OPT_SRCLINE
;
1825 char s
= print_oneline
? ' ' : '\t';
1827 if (symbol_conf
.use_callchain
&& sample
->callchain
) {
1828 struct addr_location node_al
;
1830 if (thread__resolve_callchain(al
->thread
, evsel
,
1832 stack_depth
) != 0) {
1834 error("Failed to resolve callchain. Skipping\n");
1837 callchain_cursor_commit(&callchain_cursor
);
1839 if (print_symoffset
)
1842 while (stack_depth
) {
1845 node
= callchain_cursor_current(&callchain_cursor
);
1849 if (node
->sym
&& node
->sym
->ignore
)
1853 printf("%c%16" PRIx64
, s
, node
->ip
);
1856 addr
= node
->map
->map_ip(node
->map
, node
->ip
);
1860 if (print_symoffset
) {
1861 node_al
.addr
= addr
;
1862 node_al
.map
= node
->map
;
1863 symbol__fprintf_symname_offs(node
->sym
, &node_al
, stdout
);
1865 symbol__fprintf_symname(node
->sym
, stdout
);
1870 map__fprintf_dsoname(node
->map
, stdout
);
1875 map__fprintf_srcline(node
->map
, addr
, "\n ",
1883 callchain_cursor_advance(&callchain_cursor
);
1887 if (al
->sym
&& al
->sym
->ignore
)
1891 printf("%16" PRIx64
, sample
->ip
);
1895 if (print_symoffset
)
1896 symbol__fprintf_symname_offs(al
->sym
, al
,
1899 symbol__fprintf_symname(al
->sym
, stdout
);
1904 map__fprintf_dsoname(al
->map
, stdout
);
1909 map__fprintf_srcline(al
->map
, al
->addr
, "\n ", stdout
);
1913 int perf_session__cpu_bitmap(struct perf_session
*session
,
1914 const char *cpu_list
, unsigned long *cpu_bitmap
)
1917 struct cpu_map
*map
;
1919 for (i
= 0; i
< PERF_TYPE_MAX
; ++i
) {
1920 struct perf_evsel
*evsel
;
1922 evsel
= perf_session__find_first_evtype(session
, i
);
1926 if (!(evsel
->attr
.sample_type
& PERF_SAMPLE_CPU
)) {
1927 pr_err("File does not contain CPU events. "
1928 "Remove -c option to proceed.\n");
1933 map
= cpu_map__new(cpu_list
);
1935 pr_err("Invalid cpu_list\n");
1939 for (i
= 0; i
< map
->nr
; i
++) {
1940 int cpu
= map
->map
[i
];
1942 if (cpu
>= MAX_NR_CPUS
) {
1943 pr_err("Requested CPU %d too large. "
1944 "Consider raising MAX_NR_CPUS\n", cpu
);
1945 goto out_delete_map
;
1948 set_bit(cpu
, cpu_bitmap
);
1958 void perf_session__fprintf_info(struct perf_session
*session
, FILE *fp
,
1964 if (session
== NULL
|| fp
== NULL
)
1967 fd
= perf_data_file__fd(session
->file
);
1969 ret
= fstat(fd
, &st
);
1973 fprintf(fp
, "# ========\n");
1974 fprintf(fp
, "# captured on: %s", ctime(&st
.st_ctime
));
1975 perf_header__fprintf_info(session
, fp
, full
);
1976 fprintf(fp
, "# ========\n#\n");
1980 int __perf_session__set_tracepoints_handlers(struct perf_session
*session
,
1981 const struct perf_evsel_str_handler
*assocs
,
1984 struct perf_evsel
*evsel
;
1988 for (i
= 0; i
< nr_assocs
; i
++) {
1990 * Adding a handler for an event not in the session,
1993 evsel
= perf_evlist__find_tracepoint_by_name(session
->evlist
, assocs
[i
].name
);
1998 if (evsel
->handler
!= NULL
)
2000 evsel
->handler
= assocs
[i
].handler
;
2008 int perf_event__process_id_index(struct perf_tool
*tool __maybe_unused
,
2009 union perf_event
*event
,
2010 struct perf_session
*session
)
2012 struct perf_evlist
*evlist
= session
->evlist
;
2013 struct id_index_event
*ie
= &event
->id_index
;
2014 size_t i
, nr
, max_nr
;
2016 max_nr
= (ie
->header
.size
- sizeof(struct id_index_event
)) /
2017 sizeof(struct id_index_entry
);
2023 fprintf(stdout
, " nr: %zu\n", nr
);
2025 for (i
= 0; i
< nr
; i
++) {
2026 struct id_index_entry
*e
= &ie
->entries
[i
];
2027 struct perf_sample_id
*sid
;
2030 fprintf(stdout
, " ... id: %"PRIu64
, e
->id
);
2031 fprintf(stdout
, " idx: %"PRIu64
, e
->idx
);
2032 fprintf(stdout
, " cpu: %"PRId64
, e
->cpu
);
2033 fprintf(stdout
, " tid: %"PRId64
"\n", e
->tid
);
2036 sid
= perf_evlist__id2sid(evlist
, e
->id
);
2046 int perf_event__synthesize_id_index(struct perf_tool
*tool
,
2047 perf_event__handler_t process
,
2048 struct perf_evlist
*evlist
,
2049 struct machine
*machine
)
2051 union perf_event
*ev
;
2052 struct perf_evsel
*evsel
;
2053 size_t nr
= 0, i
= 0, sz
, max_nr
, n
;
2056 pr_debug2("Synthesizing id index\n");
2058 max_nr
= (UINT16_MAX
- sizeof(struct id_index_event
)) /
2059 sizeof(struct id_index_entry
);
2061 evlist__for_each(evlist
, evsel
)
2064 n
= nr
> max_nr
? max_nr
: nr
;
2065 sz
= sizeof(struct id_index_event
) + n
* sizeof(struct id_index_entry
);
2070 ev
->id_index
.header
.type
= PERF_RECORD_ID_INDEX
;
2071 ev
->id_index
.header
.size
= sz
;
2072 ev
->id_index
.nr
= n
;
2074 evlist__for_each(evlist
, evsel
) {
2077 for (j
= 0; j
< evsel
->ids
; j
++) {
2078 struct id_index_entry
*e
;
2079 struct perf_sample_id
*sid
;
2082 err
= process(tool
, ev
, NULL
, machine
);
2089 e
= &ev
->id_index
.entries
[i
++];
2091 e
->id
= evsel
->id
[j
];
2093 sid
= perf_evlist__id2sid(evlist
, e
->id
);
2105 sz
= sizeof(struct id_index_event
) + nr
* sizeof(struct id_index_entry
);
2106 ev
->id_index
.header
.size
= sz
;
2107 ev
->id_index
.nr
= nr
;
2109 err
= process(tool
, ev
, NULL
, machine
);