]> git.ipfire.org Git - thirdparty/linux.git/blob - tools/perf/util/session.c
Merge branch 'akpm' (patches from Andrew)
[thirdparty/linux.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14
15 #include "map_symbol.h"
16 #include "branch.h"
17 #include "debug.h"
18 #include "evlist.h"
19 #include "evsel.h"
20 #include "memswap.h"
21 #include "map.h"
22 #include "symbol.h"
23 #include "session.h"
24 #include "tool.h"
25 #include "perf_regs.h"
26 #include "asm/bug.h"
27 #include "auxtrace.h"
28 #include "thread.h"
29 #include "thread-stack.h"
30 #include "sample-raw.h"
31 #include "stat.h"
32 #include "ui/progress.h"
33 #include "../perf.h"
34 #include "arch/common.h"
35 #include <internal/lib.h>
36 #include <linux/err.h>
37
38 #ifdef HAVE_ZSTD_SUPPORT
39 static int perf_session__process_compressed_event(struct perf_session *session,
40 union perf_event *event, u64 file_offset)
41 {
42 void *src;
43 size_t decomp_size, src_size;
44 u64 decomp_last_rem = 0;
45 size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
46 struct decomp *decomp, *decomp_last = session->decomp_last;
47
48 if (decomp_last) {
49 decomp_last_rem = decomp_last->size - decomp_last->head;
50 decomp_len += decomp_last_rem;
51 }
52
53 mmap_len = sizeof(struct decomp) + decomp_len;
54 decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
55 MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
56 if (decomp == MAP_FAILED) {
57 pr_err("Couldn't allocate memory for decompression\n");
58 return -1;
59 }
60
61 decomp->file_pos = file_offset;
62 decomp->mmap_len = mmap_len;
63 decomp->head = 0;
64
65 if (decomp_last_rem) {
66 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
67 decomp->size = decomp_last_rem;
68 }
69
70 src = (void *)event + sizeof(struct perf_record_compressed);
71 src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
72
73 decomp_size = zstd_decompress_stream(&(session->zstd_data), src, src_size,
74 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
75 if (!decomp_size) {
76 munmap(decomp, mmap_len);
77 pr_err("Couldn't decompress data\n");
78 return -1;
79 }
80
81 decomp->size += decomp_size;
82
83 if (session->decomp == NULL) {
84 session->decomp = decomp;
85 session->decomp_last = decomp;
86 } else {
87 session->decomp_last->next = decomp;
88 session->decomp_last = decomp;
89 }
90
91 pr_debug("decomp (B): %ld to %ld\n", src_size, decomp_size);
92
93 return 0;
94 }
95 #else /* !HAVE_ZSTD_SUPPORT */
96 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
97 #endif
98
99 static int perf_session__deliver_event(struct perf_session *session,
100 union perf_event *event,
101 struct perf_tool *tool,
102 u64 file_offset);
103
104 static int perf_session__open(struct perf_session *session)
105 {
106 struct perf_data *data = session->data;
107
108 if (perf_session__read_header(session) < 0) {
109 pr_err("incompatible file format (rerun with -v to learn more)\n");
110 return -1;
111 }
112
113 if (perf_data__is_pipe(data))
114 return 0;
115
116 if (perf_header__has_feat(&session->header, HEADER_STAT))
117 return 0;
118
119 if (!perf_evlist__valid_sample_type(session->evlist)) {
120 pr_err("non matching sample_type\n");
121 return -1;
122 }
123
124 if (!perf_evlist__valid_sample_id_all(session->evlist)) {
125 pr_err("non matching sample_id_all\n");
126 return -1;
127 }
128
129 if (!perf_evlist__valid_read_format(session->evlist)) {
130 pr_err("non matching read_format\n");
131 return -1;
132 }
133
134 return 0;
135 }
136
137 void perf_session__set_id_hdr_size(struct perf_session *session)
138 {
139 u16 id_hdr_size = perf_evlist__id_hdr_size(session->evlist);
140
141 machines__set_id_hdr_size(&session->machines, id_hdr_size);
142 }
143
144 int perf_session__create_kernel_maps(struct perf_session *session)
145 {
146 int ret = machine__create_kernel_maps(&session->machines.host);
147
148 if (ret >= 0)
149 ret = machines__create_guest_kernel_maps(&session->machines);
150 return ret;
151 }
152
153 static void perf_session__destroy_kernel_maps(struct perf_session *session)
154 {
155 machines__destroy_kernel_maps(&session->machines);
156 }
157
158 static bool perf_session__has_comm_exec(struct perf_session *session)
159 {
160 struct evsel *evsel;
161
162 evlist__for_each_entry(session->evlist, evsel) {
163 if (evsel->core.attr.comm_exec)
164 return true;
165 }
166
167 return false;
168 }
169
170 static void perf_session__set_comm_exec(struct perf_session *session)
171 {
172 bool comm_exec = perf_session__has_comm_exec(session);
173
174 machines__set_comm_exec(&session->machines, comm_exec);
175 }
176
177 static int ordered_events__deliver_event(struct ordered_events *oe,
178 struct ordered_event *event)
179 {
180 struct perf_session *session = container_of(oe, struct perf_session,
181 ordered_events);
182
183 return perf_session__deliver_event(session, event->event,
184 session->tool, event->file_offset);
185 }
186
187 struct perf_session *perf_session__new(struct perf_data *data,
188 bool repipe, struct perf_tool *tool)
189 {
190 int ret = -ENOMEM;
191 struct perf_session *session = zalloc(sizeof(*session));
192
193 if (!session)
194 goto out;
195
196 session->repipe = repipe;
197 session->tool = tool;
198 INIT_LIST_HEAD(&session->auxtrace_index);
199 machines__init(&session->machines);
200 ordered_events__init(&session->ordered_events,
201 ordered_events__deliver_event, NULL);
202
203 perf_env__init(&session->header.env);
204 if (data) {
205 ret = perf_data__open(data);
206 if (ret < 0)
207 goto out_delete;
208
209 session->data = data;
210
211 if (perf_data__is_read(data)) {
212 ret = perf_session__open(session);
213 if (ret < 0)
214 goto out_delete;
215
216 /*
217 * set session attributes that are present in perf.data
218 * but not in pipe-mode.
219 */
220 if (!data->is_pipe) {
221 perf_session__set_id_hdr_size(session);
222 perf_session__set_comm_exec(session);
223 }
224
225 perf_evlist__init_trace_event_sample_raw(session->evlist);
226
227 /* Open the directory data. */
228 if (data->is_dir) {
229 ret = perf_data__open_dir(data);
230 if (ret)
231 goto out_delete;
232 }
233
234 if (!symbol_conf.kallsyms_name &&
235 !symbol_conf.vmlinux_name)
236 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
237 }
238 } else {
239 session->machines.host.env = &perf_env;
240 }
241
242 session->machines.host.single_address_space =
243 perf_env__single_address_space(session->machines.host.env);
244
245 if (!data || perf_data__is_write(data)) {
246 /*
247 * In O_RDONLY mode this will be performed when reading the
248 * kernel MMAP event, in perf_event__process_mmap().
249 */
250 if (perf_session__create_kernel_maps(session) < 0)
251 pr_warning("Cannot read kernel map\n");
252 }
253
254 /*
255 * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
256 * processed, so perf_evlist__sample_id_all is not meaningful here.
257 */
258 if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
259 tool->ordered_events && !perf_evlist__sample_id_all(session->evlist)) {
260 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
261 tool->ordered_events = false;
262 }
263
264 return session;
265
266 out_delete:
267 perf_session__delete(session);
268 out:
269 return ERR_PTR(ret);
270 }
271
272 static void perf_session__delete_threads(struct perf_session *session)
273 {
274 machine__delete_threads(&session->machines.host);
275 }
276
277 static void perf_session__release_decomp_events(struct perf_session *session)
278 {
279 struct decomp *next, *decomp;
280 size_t mmap_len;
281 next = session->decomp;
282 do {
283 decomp = next;
284 if (decomp == NULL)
285 break;
286 next = decomp->next;
287 mmap_len = decomp->mmap_len;
288 munmap(decomp, mmap_len);
289 } while (1);
290 }
291
292 void perf_session__delete(struct perf_session *session)
293 {
294 if (session == NULL)
295 return;
296 auxtrace__free(session);
297 auxtrace_index__free(&session->auxtrace_index);
298 perf_session__destroy_kernel_maps(session);
299 perf_session__delete_threads(session);
300 perf_session__release_decomp_events(session);
301 perf_env__exit(&session->header.env);
302 machines__exit(&session->machines);
303 if (session->data)
304 perf_data__close(session->data);
305 free(session);
306 }
307
308 static int process_event_synth_tracing_data_stub(struct perf_session *session
309 __maybe_unused,
310 union perf_event *event
311 __maybe_unused)
312 {
313 dump_printf(": unhandled!\n");
314 return 0;
315 }
316
317 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
318 union perf_event *event __maybe_unused,
319 struct evlist **pevlist
320 __maybe_unused)
321 {
322 dump_printf(": unhandled!\n");
323 return 0;
324 }
325
326 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
327 union perf_event *event __maybe_unused,
328 struct evlist **pevlist
329 __maybe_unused)
330 {
331 if (dump_trace)
332 perf_event__fprintf_event_update(event, stdout);
333
334 dump_printf(": unhandled!\n");
335 return 0;
336 }
337
338 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
339 union perf_event *event __maybe_unused,
340 struct perf_sample *sample __maybe_unused,
341 struct evsel *evsel __maybe_unused,
342 struct machine *machine __maybe_unused)
343 {
344 dump_printf(": unhandled!\n");
345 return 0;
346 }
347
348 static int process_event_stub(struct perf_tool *tool __maybe_unused,
349 union perf_event *event __maybe_unused,
350 struct perf_sample *sample __maybe_unused,
351 struct machine *machine __maybe_unused)
352 {
353 dump_printf(": unhandled!\n");
354 return 0;
355 }
356
357 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
358 union perf_event *event __maybe_unused,
359 struct ordered_events *oe __maybe_unused)
360 {
361 dump_printf(": unhandled!\n");
362 return 0;
363 }
364
365 static int process_finished_round(struct perf_tool *tool,
366 union perf_event *event,
367 struct ordered_events *oe);
368
369 static int skipn(int fd, off_t n)
370 {
371 char buf[4096];
372 ssize_t ret;
373
374 while (n > 0) {
375 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
376 if (ret <= 0)
377 return ret;
378 n -= ret;
379 }
380
381 return 0;
382 }
383
384 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
385 union perf_event *event)
386 {
387 dump_printf(": unhandled!\n");
388 if (perf_data__is_pipe(session->data))
389 skipn(perf_data__fd(session->data), event->auxtrace.size);
390 return event->auxtrace.size;
391 }
392
393 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
394 union perf_event *event __maybe_unused)
395 {
396 dump_printf(": unhandled!\n");
397 return 0;
398 }
399
400
401 static
402 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
403 union perf_event *event __maybe_unused)
404 {
405 if (dump_trace)
406 perf_event__fprintf_thread_map(event, stdout);
407
408 dump_printf(": unhandled!\n");
409 return 0;
410 }
411
412 static
413 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
414 union perf_event *event __maybe_unused)
415 {
416 if (dump_trace)
417 perf_event__fprintf_cpu_map(event, stdout);
418
419 dump_printf(": unhandled!\n");
420 return 0;
421 }
422
423 static
424 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
425 union perf_event *event __maybe_unused)
426 {
427 if (dump_trace)
428 perf_event__fprintf_stat_config(event, stdout);
429
430 dump_printf(": unhandled!\n");
431 return 0;
432 }
433
434 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
435 union perf_event *event)
436 {
437 if (dump_trace)
438 perf_event__fprintf_stat(event, stdout);
439
440 dump_printf(": unhandled!\n");
441 return 0;
442 }
443
444 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
445 union perf_event *event)
446 {
447 if (dump_trace)
448 perf_event__fprintf_stat_round(event, stdout);
449
450 dump_printf(": unhandled!\n");
451 return 0;
452 }
453
454 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
455 union perf_event *event __maybe_unused,
456 u64 file_offset __maybe_unused)
457 {
458 dump_printf(": unhandled!\n");
459 return 0;
460 }
461
462 void perf_tool__fill_defaults(struct perf_tool *tool)
463 {
464 if (tool->sample == NULL)
465 tool->sample = process_event_sample_stub;
466 if (tool->mmap == NULL)
467 tool->mmap = process_event_stub;
468 if (tool->mmap2 == NULL)
469 tool->mmap2 = process_event_stub;
470 if (tool->comm == NULL)
471 tool->comm = process_event_stub;
472 if (tool->namespaces == NULL)
473 tool->namespaces = process_event_stub;
474 if (tool->cgroup == NULL)
475 tool->cgroup = process_event_stub;
476 if (tool->fork == NULL)
477 tool->fork = process_event_stub;
478 if (tool->exit == NULL)
479 tool->exit = process_event_stub;
480 if (tool->lost == NULL)
481 tool->lost = perf_event__process_lost;
482 if (tool->lost_samples == NULL)
483 tool->lost_samples = perf_event__process_lost_samples;
484 if (tool->aux == NULL)
485 tool->aux = perf_event__process_aux;
486 if (tool->itrace_start == NULL)
487 tool->itrace_start = perf_event__process_itrace_start;
488 if (tool->context_switch == NULL)
489 tool->context_switch = perf_event__process_switch;
490 if (tool->ksymbol == NULL)
491 tool->ksymbol = perf_event__process_ksymbol;
492 if (tool->bpf == NULL)
493 tool->bpf = perf_event__process_bpf;
494 if (tool->read == NULL)
495 tool->read = process_event_sample_stub;
496 if (tool->throttle == NULL)
497 tool->throttle = process_event_stub;
498 if (tool->unthrottle == NULL)
499 tool->unthrottle = process_event_stub;
500 if (tool->attr == NULL)
501 tool->attr = process_event_synth_attr_stub;
502 if (tool->event_update == NULL)
503 tool->event_update = process_event_synth_event_update_stub;
504 if (tool->tracing_data == NULL)
505 tool->tracing_data = process_event_synth_tracing_data_stub;
506 if (tool->build_id == NULL)
507 tool->build_id = process_event_op2_stub;
508 if (tool->finished_round == NULL) {
509 if (tool->ordered_events)
510 tool->finished_round = process_finished_round;
511 else
512 tool->finished_round = process_finished_round_stub;
513 }
514 if (tool->id_index == NULL)
515 tool->id_index = process_event_op2_stub;
516 if (tool->auxtrace_info == NULL)
517 tool->auxtrace_info = process_event_op2_stub;
518 if (tool->auxtrace == NULL)
519 tool->auxtrace = process_event_auxtrace_stub;
520 if (tool->auxtrace_error == NULL)
521 tool->auxtrace_error = process_event_op2_stub;
522 if (tool->thread_map == NULL)
523 tool->thread_map = process_event_thread_map_stub;
524 if (tool->cpu_map == NULL)
525 tool->cpu_map = process_event_cpu_map_stub;
526 if (tool->stat_config == NULL)
527 tool->stat_config = process_event_stat_config_stub;
528 if (tool->stat == NULL)
529 tool->stat = process_stat_stub;
530 if (tool->stat_round == NULL)
531 tool->stat_round = process_stat_round_stub;
532 if (tool->time_conv == NULL)
533 tool->time_conv = process_event_op2_stub;
534 if (tool->feature == NULL)
535 tool->feature = process_event_op2_stub;
536 if (tool->compressed == NULL)
537 tool->compressed = perf_session__process_compressed_event;
538 }
539
540 static void swap_sample_id_all(union perf_event *event, void *data)
541 {
542 void *end = (void *) event + event->header.size;
543 int size = end - data;
544
545 BUG_ON(size % sizeof(u64));
546 mem_bswap_64(data, size);
547 }
548
549 static void perf_event__all64_swap(union perf_event *event,
550 bool sample_id_all __maybe_unused)
551 {
552 struct perf_event_header *hdr = &event->header;
553 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
554 }
555
556 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
557 {
558 event->comm.pid = bswap_32(event->comm.pid);
559 event->comm.tid = bswap_32(event->comm.tid);
560
561 if (sample_id_all) {
562 void *data = &event->comm.comm;
563
564 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
565 swap_sample_id_all(event, data);
566 }
567 }
568
569 static void perf_event__mmap_swap(union perf_event *event,
570 bool sample_id_all)
571 {
572 event->mmap.pid = bswap_32(event->mmap.pid);
573 event->mmap.tid = bswap_32(event->mmap.tid);
574 event->mmap.start = bswap_64(event->mmap.start);
575 event->mmap.len = bswap_64(event->mmap.len);
576 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
577
578 if (sample_id_all) {
579 void *data = &event->mmap.filename;
580
581 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
582 swap_sample_id_all(event, data);
583 }
584 }
585
586 static void perf_event__mmap2_swap(union perf_event *event,
587 bool sample_id_all)
588 {
589 event->mmap2.pid = bswap_32(event->mmap2.pid);
590 event->mmap2.tid = bswap_32(event->mmap2.tid);
591 event->mmap2.start = bswap_64(event->mmap2.start);
592 event->mmap2.len = bswap_64(event->mmap2.len);
593 event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
594 event->mmap2.maj = bswap_32(event->mmap2.maj);
595 event->mmap2.min = bswap_32(event->mmap2.min);
596 event->mmap2.ino = bswap_64(event->mmap2.ino);
597
598 if (sample_id_all) {
599 void *data = &event->mmap2.filename;
600
601 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
602 swap_sample_id_all(event, data);
603 }
604 }
605 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
606 {
607 event->fork.pid = bswap_32(event->fork.pid);
608 event->fork.tid = bswap_32(event->fork.tid);
609 event->fork.ppid = bswap_32(event->fork.ppid);
610 event->fork.ptid = bswap_32(event->fork.ptid);
611 event->fork.time = bswap_64(event->fork.time);
612
613 if (sample_id_all)
614 swap_sample_id_all(event, &event->fork + 1);
615 }
616
617 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
618 {
619 event->read.pid = bswap_32(event->read.pid);
620 event->read.tid = bswap_32(event->read.tid);
621 event->read.value = bswap_64(event->read.value);
622 event->read.time_enabled = bswap_64(event->read.time_enabled);
623 event->read.time_running = bswap_64(event->read.time_running);
624 event->read.id = bswap_64(event->read.id);
625
626 if (sample_id_all)
627 swap_sample_id_all(event, &event->read + 1);
628 }
629
630 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
631 {
632 event->aux.aux_offset = bswap_64(event->aux.aux_offset);
633 event->aux.aux_size = bswap_64(event->aux.aux_size);
634 event->aux.flags = bswap_64(event->aux.flags);
635
636 if (sample_id_all)
637 swap_sample_id_all(event, &event->aux + 1);
638 }
639
640 static void perf_event__itrace_start_swap(union perf_event *event,
641 bool sample_id_all)
642 {
643 event->itrace_start.pid = bswap_32(event->itrace_start.pid);
644 event->itrace_start.tid = bswap_32(event->itrace_start.tid);
645
646 if (sample_id_all)
647 swap_sample_id_all(event, &event->itrace_start + 1);
648 }
649
650 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
651 {
652 if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
653 event->context_switch.next_prev_pid =
654 bswap_32(event->context_switch.next_prev_pid);
655 event->context_switch.next_prev_tid =
656 bswap_32(event->context_switch.next_prev_tid);
657 }
658
659 if (sample_id_all)
660 swap_sample_id_all(event, &event->context_switch + 1);
661 }
662
663 static void perf_event__throttle_swap(union perf_event *event,
664 bool sample_id_all)
665 {
666 event->throttle.time = bswap_64(event->throttle.time);
667 event->throttle.id = bswap_64(event->throttle.id);
668 event->throttle.stream_id = bswap_64(event->throttle.stream_id);
669
670 if (sample_id_all)
671 swap_sample_id_all(event, &event->throttle + 1);
672 }
673
674 static void perf_event__namespaces_swap(union perf_event *event,
675 bool sample_id_all)
676 {
677 u64 i;
678
679 event->namespaces.pid = bswap_32(event->namespaces.pid);
680 event->namespaces.tid = bswap_32(event->namespaces.tid);
681 event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
682
683 for (i = 0; i < event->namespaces.nr_namespaces; i++) {
684 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
685
686 ns->dev = bswap_64(ns->dev);
687 ns->ino = bswap_64(ns->ino);
688 }
689
690 if (sample_id_all)
691 swap_sample_id_all(event, &event->namespaces.link_info[i]);
692 }
693
694 static u8 revbyte(u8 b)
695 {
696 int rev = (b >> 4) | ((b & 0xf) << 4);
697 rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
698 rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
699 return (u8) rev;
700 }
701
702 /*
703 * XXX this is hack in attempt to carry flags bitfield
704 * through endian village. ABI says:
705 *
706 * Bit-fields are allocated from right to left (least to most significant)
707 * on little-endian implementations and from left to right (most to least
708 * significant) on big-endian implementations.
709 *
710 * The above seems to be byte specific, so we need to reverse each
711 * byte of the bitfield. 'Internet' also says this might be implementation
712 * specific and we probably need proper fix and carry perf_event_attr
713 * bitfield flags in separate data file FEAT_ section. Thought this seems
714 * to work for now.
715 */
716 static void swap_bitfield(u8 *p, unsigned len)
717 {
718 unsigned i;
719
720 for (i = 0; i < len; i++) {
721 *p = revbyte(*p);
722 p++;
723 }
724 }
725
726 /* exported for swapping attributes in file header */
727 void perf_event__attr_swap(struct perf_event_attr *attr)
728 {
729 attr->type = bswap_32(attr->type);
730 attr->size = bswap_32(attr->size);
731
732 #define bswap_safe(f, n) \
733 (attr->size > (offsetof(struct perf_event_attr, f) + \
734 sizeof(attr->f) * (n)))
735 #define bswap_field(f, sz) \
736 do { \
737 if (bswap_safe(f, 0)) \
738 attr->f = bswap_##sz(attr->f); \
739 } while(0)
740 #define bswap_field_16(f) bswap_field(f, 16)
741 #define bswap_field_32(f) bswap_field(f, 32)
742 #define bswap_field_64(f) bswap_field(f, 64)
743
744 bswap_field_64(config);
745 bswap_field_64(sample_period);
746 bswap_field_64(sample_type);
747 bswap_field_64(read_format);
748 bswap_field_32(wakeup_events);
749 bswap_field_32(bp_type);
750 bswap_field_64(bp_addr);
751 bswap_field_64(bp_len);
752 bswap_field_64(branch_sample_type);
753 bswap_field_64(sample_regs_user);
754 bswap_field_32(sample_stack_user);
755 bswap_field_32(aux_watermark);
756 bswap_field_16(sample_max_stack);
757 bswap_field_32(aux_sample_size);
758
759 /*
760 * After read_format are bitfields. Check read_format because
761 * we are unable to use offsetof on bitfield.
762 */
763 if (bswap_safe(read_format, 1))
764 swap_bitfield((u8 *) (&attr->read_format + 1),
765 sizeof(u64));
766 #undef bswap_field_64
767 #undef bswap_field_32
768 #undef bswap_field
769 #undef bswap_safe
770 }
771
772 static void perf_event__hdr_attr_swap(union perf_event *event,
773 bool sample_id_all __maybe_unused)
774 {
775 size_t size;
776
777 perf_event__attr_swap(&event->attr.attr);
778
779 size = event->header.size;
780 size -= (void *)&event->attr.id - (void *)event;
781 mem_bswap_64(event->attr.id, size);
782 }
783
784 static void perf_event__event_update_swap(union perf_event *event,
785 bool sample_id_all __maybe_unused)
786 {
787 event->event_update.type = bswap_64(event->event_update.type);
788 event->event_update.id = bswap_64(event->event_update.id);
789 }
790
791 static void perf_event__event_type_swap(union perf_event *event,
792 bool sample_id_all __maybe_unused)
793 {
794 event->event_type.event_type.event_id =
795 bswap_64(event->event_type.event_type.event_id);
796 }
797
798 static void perf_event__tracing_data_swap(union perf_event *event,
799 bool sample_id_all __maybe_unused)
800 {
801 event->tracing_data.size = bswap_32(event->tracing_data.size);
802 }
803
804 static void perf_event__auxtrace_info_swap(union perf_event *event,
805 bool sample_id_all __maybe_unused)
806 {
807 size_t size;
808
809 event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
810
811 size = event->header.size;
812 size -= (void *)&event->auxtrace_info.priv - (void *)event;
813 mem_bswap_64(event->auxtrace_info.priv, size);
814 }
815
816 static void perf_event__auxtrace_swap(union perf_event *event,
817 bool sample_id_all __maybe_unused)
818 {
819 event->auxtrace.size = bswap_64(event->auxtrace.size);
820 event->auxtrace.offset = bswap_64(event->auxtrace.offset);
821 event->auxtrace.reference = bswap_64(event->auxtrace.reference);
822 event->auxtrace.idx = bswap_32(event->auxtrace.idx);
823 event->auxtrace.tid = bswap_32(event->auxtrace.tid);
824 event->auxtrace.cpu = bswap_32(event->auxtrace.cpu);
825 }
826
827 static void perf_event__auxtrace_error_swap(union perf_event *event,
828 bool sample_id_all __maybe_unused)
829 {
830 event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
831 event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
832 event->auxtrace_error.cpu = bswap_32(event->auxtrace_error.cpu);
833 event->auxtrace_error.pid = bswap_32(event->auxtrace_error.pid);
834 event->auxtrace_error.tid = bswap_32(event->auxtrace_error.tid);
835 event->auxtrace_error.fmt = bswap_32(event->auxtrace_error.fmt);
836 event->auxtrace_error.ip = bswap_64(event->auxtrace_error.ip);
837 if (event->auxtrace_error.fmt)
838 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
839 }
840
841 static void perf_event__thread_map_swap(union perf_event *event,
842 bool sample_id_all __maybe_unused)
843 {
844 unsigned i;
845
846 event->thread_map.nr = bswap_64(event->thread_map.nr);
847
848 for (i = 0; i < event->thread_map.nr; i++)
849 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
850 }
851
852 static void perf_event__cpu_map_swap(union perf_event *event,
853 bool sample_id_all __maybe_unused)
854 {
855 struct perf_record_cpu_map_data *data = &event->cpu_map.data;
856 struct cpu_map_entries *cpus;
857 struct perf_record_record_cpu_map *mask;
858 unsigned i;
859
860 data->type = bswap_64(data->type);
861
862 switch (data->type) {
863 case PERF_CPU_MAP__CPUS:
864 cpus = (struct cpu_map_entries *)data->data;
865
866 cpus->nr = bswap_16(cpus->nr);
867
868 for (i = 0; i < cpus->nr; i++)
869 cpus->cpu[i] = bswap_16(cpus->cpu[i]);
870 break;
871 case PERF_CPU_MAP__MASK:
872 mask = (struct perf_record_record_cpu_map *)data->data;
873
874 mask->nr = bswap_16(mask->nr);
875 mask->long_size = bswap_16(mask->long_size);
876
877 switch (mask->long_size) {
878 case 4: mem_bswap_32(&mask->mask, mask->nr); break;
879 case 8: mem_bswap_64(&mask->mask, mask->nr); break;
880 default:
881 pr_err("cpu_map swap: unsupported long size\n");
882 }
883 default:
884 break;
885 }
886 }
887
888 static void perf_event__stat_config_swap(union perf_event *event,
889 bool sample_id_all __maybe_unused)
890 {
891 u64 size;
892
893 size = event->stat_config.nr * sizeof(event->stat_config.data[0]);
894 size += 1; /* nr item itself */
895 mem_bswap_64(&event->stat_config.nr, size);
896 }
897
898 static void perf_event__stat_swap(union perf_event *event,
899 bool sample_id_all __maybe_unused)
900 {
901 event->stat.id = bswap_64(event->stat.id);
902 event->stat.thread = bswap_32(event->stat.thread);
903 event->stat.cpu = bswap_32(event->stat.cpu);
904 event->stat.val = bswap_64(event->stat.val);
905 event->stat.ena = bswap_64(event->stat.ena);
906 event->stat.run = bswap_64(event->stat.run);
907 }
908
909 static void perf_event__stat_round_swap(union perf_event *event,
910 bool sample_id_all __maybe_unused)
911 {
912 event->stat_round.type = bswap_64(event->stat_round.type);
913 event->stat_round.time = bswap_64(event->stat_round.time);
914 }
915
916 typedef void (*perf_event__swap_op)(union perf_event *event,
917 bool sample_id_all);
918
919 static perf_event__swap_op perf_event__swap_ops[] = {
920 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
921 [PERF_RECORD_MMAP2] = perf_event__mmap2_swap,
922 [PERF_RECORD_COMM] = perf_event__comm_swap,
923 [PERF_RECORD_FORK] = perf_event__task_swap,
924 [PERF_RECORD_EXIT] = perf_event__task_swap,
925 [PERF_RECORD_LOST] = perf_event__all64_swap,
926 [PERF_RECORD_READ] = perf_event__read_swap,
927 [PERF_RECORD_THROTTLE] = perf_event__throttle_swap,
928 [PERF_RECORD_UNTHROTTLE] = perf_event__throttle_swap,
929 [PERF_RECORD_SAMPLE] = perf_event__all64_swap,
930 [PERF_RECORD_AUX] = perf_event__aux_swap,
931 [PERF_RECORD_ITRACE_START] = perf_event__itrace_start_swap,
932 [PERF_RECORD_LOST_SAMPLES] = perf_event__all64_swap,
933 [PERF_RECORD_SWITCH] = perf_event__switch_swap,
934 [PERF_RECORD_SWITCH_CPU_WIDE] = perf_event__switch_swap,
935 [PERF_RECORD_NAMESPACES] = perf_event__namespaces_swap,
936 [PERF_RECORD_HEADER_ATTR] = perf_event__hdr_attr_swap,
937 [PERF_RECORD_HEADER_EVENT_TYPE] = perf_event__event_type_swap,
938 [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
939 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
940 [PERF_RECORD_ID_INDEX] = perf_event__all64_swap,
941 [PERF_RECORD_AUXTRACE_INFO] = perf_event__auxtrace_info_swap,
942 [PERF_RECORD_AUXTRACE] = perf_event__auxtrace_swap,
943 [PERF_RECORD_AUXTRACE_ERROR] = perf_event__auxtrace_error_swap,
944 [PERF_RECORD_THREAD_MAP] = perf_event__thread_map_swap,
945 [PERF_RECORD_CPU_MAP] = perf_event__cpu_map_swap,
946 [PERF_RECORD_STAT_CONFIG] = perf_event__stat_config_swap,
947 [PERF_RECORD_STAT] = perf_event__stat_swap,
948 [PERF_RECORD_STAT_ROUND] = perf_event__stat_round_swap,
949 [PERF_RECORD_EVENT_UPDATE] = perf_event__event_update_swap,
950 [PERF_RECORD_TIME_CONV] = perf_event__all64_swap,
951 [PERF_RECORD_HEADER_MAX] = NULL,
952 };
953
954 /*
955 * When perf record finishes a pass on every buffers, it records this pseudo
956 * event.
957 * We record the max timestamp t found in the pass n.
958 * Assuming these timestamps are monotonic across cpus, we know that if
959 * a buffer still has events with timestamps below t, they will be all
960 * available and then read in the pass n + 1.
961 * Hence when we start to read the pass n + 2, we can safely flush every
962 * events with timestamps below t.
963 *
964 * ============ PASS n =================
965 * CPU 0 | CPU 1
966 * |
967 * cnt1 timestamps | cnt2 timestamps
968 * 1 | 2
969 * 2 | 3
970 * - | 4 <--- max recorded
971 *
972 * ============ PASS n + 1 ==============
973 * CPU 0 | CPU 1
974 * |
975 * cnt1 timestamps | cnt2 timestamps
976 * 3 | 5
977 * 4 | 6
978 * 5 | 7 <---- max recorded
979 *
980 * Flush every events below timestamp 4
981 *
982 * ============ PASS n + 2 ==============
983 * CPU 0 | CPU 1
984 * |
985 * cnt1 timestamps | cnt2 timestamps
986 * 6 | 8
987 * 7 | 9
988 * - | 10
989 *
990 * Flush every events below timestamp 7
991 * etc...
992 */
993 static int process_finished_round(struct perf_tool *tool __maybe_unused,
994 union perf_event *event __maybe_unused,
995 struct ordered_events *oe)
996 {
997 if (dump_trace)
998 fprintf(stdout, "\n");
999 return ordered_events__flush(oe, OE_FLUSH__ROUND);
1000 }
1001
1002 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1003 u64 timestamp, u64 file_offset)
1004 {
1005 return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset);
1006 }
1007
1008 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1009 {
1010 struct ip_callchain *callchain = sample->callchain;
1011 struct branch_stack *lbr_stack = sample->branch_stack;
1012 struct branch_entry *entries = perf_sample__branch_entries(sample);
1013 u64 kernel_callchain_nr = callchain->nr;
1014 unsigned int i;
1015
1016 for (i = 0; i < kernel_callchain_nr; i++) {
1017 if (callchain->ips[i] == PERF_CONTEXT_USER)
1018 break;
1019 }
1020
1021 if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1022 u64 total_nr;
1023 /*
1024 * LBR callstack can only get user call chain,
1025 * i is kernel call chain number,
1026 * 1 is PERF_CONTEXT_USER.
1027 *
1028 * The user call chain is stored in LBR registers.
1029 * LBR are pair registers. The caller is stored
1030 * in "from" register, while the callee is stored
1031 * in "to" register.
1032 * For example, there is a call stack
1033 * "A"->"B"->"C"->"D".
1034 * The LBR registers will recorde like
1035 * "C"->"D", "B"->"C", "A"->"B".
1036 * So only the first "to" register and all "from"
1037 * registers are needed to construct the whole stack.
1038 */
1039 total_nr = i + 1 + lbr_stack->nr + 1;
1040 kernel_callchain_nr = i + 1;
1041
1042 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1043
1044 for (i = 0; i < kernel_callchain_nr; i++)
1045 printf("..... %2d: %016" PRIx64 "\n",
1046 i, callchain->ips[i]);
1047
1048 printf("..... %2d: %016" PRIx64 "\n",
1049 (int)(kernel_callchain_nr), entries[0].to);
1050 for (i = 0; i < lbr_stack->nr; i++)
1051 printf("..... %2d: %016" PRIx64 "\n",
1052 (int)(i + kernel_callchain_nr + 1), entries[i].from);
1053 }
1054 }
1055
1056 static void callchain__printf(struct evsel *evsel,
1057 struct perf_sample *sample)
1058 {
1059 unsigned int i;
1060 struct ip_callchain *callchain = sample->callchain;
1061
1062 if (evsel__has_branch_callstack(evsel))
1063 callchain__lbr_callstack_printf(sample);
1064
1065 printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1066
1067 for (i = 0; i < callchain->nr; i++)
1068 printf("..... %2d: %016" PRIx64 "\n",
1069 i, callchain->ips[i]);
1070 }
1071
1072 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1073 {
1074 struct branch_entry *entries = perf_sample__branch_entries(sample);
1075 uint64_t i;
1076
1077 printf("%s: nr:%" PRIu64 "\n",
1078 !callstack ? "... branch stack" : "... branch callstack",
1079 sample->branch_stack->nr);
1080
1081 for (i = 0; i < sample->branch_stack->nr; i++) {
1082 struct branch_entry *e = &entries[i];
1083
1084 if (!callstack) {
1085 printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x\n",
1086 i, e->from, e->to,
1087 (unsigned short)e->flags.cycles,
1088 e->flags.mispred ? "M" : " ",
1089 e->flags.predicted ? "P" : " ",
1090 e->flags.abort ? "A" : " ",
1091 e->flags.in_tx ? "T" : " ",
1092 (unsigned)e->flags.reserved);
1093 } else {
1094 printf("..... %2"PRIu64": %016" PRIx64 "\n",
1095 i, i > 0 ? e->from : e->to);
1096 }
1097 }
1098 }
1099
1100 static void regs_dump__printf(u64 mask, u64 *regs)
1101 {
1102 unsigned rid, i = 0;
1103
1104 for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1105 u64 val = regs[i++];
1106
1107 printf(".... %-5s 0x%" PRIx64 "\n",
1108 perf_reg_name(rid), val);
1109 }
1110 }
1111
1112 static const char *regs_abi[] = {
1113 [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1114 [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1115 [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1116 };
1117
1118 static inline const char *regs_dump_abi(struct regs_dump *d)
1119 {
1120 if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1121 return "unknown";
1122
1123 return regs_abi[d->abi];
1124 }
1125
1126 static void regs__printf(const char *type, struct regs_dump *regs)
1127 {
1128 u64 mask = regs->mask;
1129
1130 printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1131 type,
1132 mask,
1133 regs_dump_abi(regs));
1134
1135 regs_dump__printf(mask, regs->regs);
1136 }
1137
1138 static void regs_user__printf(struct perf_sample *sample)
1139 {
1140 struct regs_dump *user_regs = &sample->user_regs;
1141
1142 if (user_regs->regs)
1143 regs__printf("user", user_regs);
1144 }
1145
1146 static void regs_intr__printf(struct perf_sample *sample)
1147 {
1148 struct regs_dump *intr_regs = &sample->intr_regs;
1149
1150 if (intr_regs->regs)
1151 regs__printf("intr", intr_regs);
1152 }
1153
1154 static void stack_user__printf(struct stack_dump *dump)
1155 {
1156 printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1157 dump->size, dump->offset);
1158 }
1159
1160 static void perf_evlist__print_tstamp(struct evlist *evlist,
1161 union perf_event *event,
1162 struct perf_sample *sample)
1163 {
1164 u64 sample_type = __perf_evlist__combined_sample_type(evlist);
1165
1166 if (event->header.type != PERF_RECORD_SAMPLE &&
1167 !perf_evlist__sample_id_all(evlist)) {
1168 fputs("-1 -1 ", stdout);
1169 return;
1170 }
1171
1172 if ((sample_type & PERF_SAMPLE_CPU))
1173 printf("%u ", sample->cpu);
1174
1175 if (sample_type & PERF_SAMPLE_TIME)
1176 printf("%" PRIu64 " ", sample->time);
1177 }
1178
1179 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1180 {
1181 printf("... sample_read:\n");
1182
1183 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1184 printf("...... time enabled %016" PRIx64 "\n",
1185 sample->read.time_enabled);
1186
1187 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1188 printf("...... time running %016" PRIx64 "\n",
1189 sample->read.time_running);
1190
1191 if (read_format & PERF_FORMAT_GROUP) {
1192 u64 i;
1193
1194 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1195
1196 for (i = 0; i < sample->read.group.nr; i++) {
1197 struct sample_read_value *value;
1198
1199 value = &sample->read.group.values[i];
1200 printf("..... id %016" PRIx64
1201 ", value %016" PRIx64 "\n",
1202 value->id, value->value);
1203 }
1204 } else
1205 printf("..... id %016" PRIx64 ", value %016" PRIx64 "\n",
1206 sample->read.one.id, sample->read.one.value);
1207 }
1208
1209 static void dump_event(struct evlist *evlist, union perf_event *event,
1210 u64 file_offset, struct perf_sample *sample)
1211 {
1212 if (!dump_trace)
1213 return;
1214
1215 printf("\n%#" PRIx64 " [%#x]: event: %d\n",
1216 file_offset, event->header.size, event->header.type);
1217
1218 trace_event(event);
1219 if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1220 evlist->trace_event_sample_raw(evlist, event, sample);
1221
1222 if (sample)
1223 perf_evlist__print_tstamp(evlist, event, sample);
1224
1225 printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1226 event->header.size, perf_event__name(event->header.type));
1227 }
1228
1229 static void dump_sample(struct evsel *evsel, union perf_event *event,
1230 struct perf_sample *sample)
1231 {
1232 u64 sample_type;
1233
1234 if (!dump_trace)
1235 return;
1236
1237 printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1238 event->header.misc, sample->pid, sample->tid, sample->ip,
1239 sample->period, sample->addr);
1240
1241 sample_type = evsel->core.attr.sample_type;
1242
1243 if (evsel__has_callchain(evsel))
1244 callchain__printf(evsel, sample);
1245
1246 if (evsel__has_br_stack(evsel))
1247 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1248
1249 if (sample_type & PERF_SAMPLE_REGS_USER)
1250 regs_user__printf(sample);
1251
1252 if (sample_type & PERF_SAMPLE_REGS_INTR)
1253 regs_intr__printf(sample);
1254
1255 if (sample_type & PERF_SAMPLE_STACK_USER)
1256 stack_user__printf(&sample->user_stack);
1257
1258 if (sample_type & PERF_SAMPLE_WEIGHT)
1259 printf("... weight: %" PRIu64 "\n", sample->weight);
1260
1261 if (sample_type & PERF_SAMPLE_DATA_SRC)
1262 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1263
1264 if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1265 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1266
1267 if (sample_type & PERF_SAMPLE_TRANSACTION)
1268 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1269
1270 if (sample_type & PERF_SAMPLE_READ)
1271 sample_read__printf(sample, evsel->core.attr.read_format);
1272 }
1273
1274 static void dump_read(struct evsel *evsel, union perf_event *event)
1275 {
1276 struct perf_record_read *read_event = &event->read;
1277 u64 read_format;
1278
1279 if (!dump_trace)
1280 return;
1281
1282 printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1283 evsel__name(evsel), event->read.value);
1284
1285 if (!evsel)
1286 return;
1287
1288 read_format = evsel->core.attr.read_format;
1289
1290 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1291 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1292
1293 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1294 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1295
1296 if (read_format & PERF_FORMAT_ID)
1297 printf("... id : %" PRI_lu64 "\n", read_event->id);
1298 }
1299
1300 static struct machine *machines__find_for_cpumode(struct machines *machines,
1301 union perf_event *event,
1302 struct perf_sample *sample)
1303 {
1304 struct machine *machine;
1305
1306 if (perf_guest &&
1307 ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1308 (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1309 u32 pid;
1310
1311 if (event->header.type == PERF_RECORD_MMAP
1312 || event->header.type == PERF_RECORD_MMAP2)
1313 pid = event->mmap.pid;
1314 else
1315 pid = sample->pid;
1316
1317 machine = machines__find(machines, pid);
1318 if (!machine)
1319 machine = machines__findnew(machines, DEFAULT_GUEST_KERNEL_ID);
1320 return machine;
1321 }
1322
1323 return &machines->host;
1324 }
1325
1326 static int deliver_sample_value(struct evlist *evlist,
1327 struct perf_tool *tool,
1328 union perf_event *event,
1329 struct perf_sample *sample,
1330 struct sample_read_value *v,
1331 struct machine *machine)
1332 {
1333 struct perf_sample_id *sid = perf_evlist__id2sid(evlist, v->id);
1334 struct evsel *evsel;
1335
1336 if (sid) {
1337 sample->id = v->id;
1338 sample->period = v->value - sid->period;
1339 sid->period = v->value;
1340 }
1341
1342 if (!sid || sid->evsel == NULL) {
1343 ++evlist->stats.nr_unknown_id;
1344 return 0;
1345 }
1346
1347 /*
1348 * There's no reason to deliver sample
1349 * for zero period, bail out.
1350 */
1351 if (!sample->period)
1352 return 0;
1353
1354 evsel = container_of(sid->evsel, struct evsel, core);
1355 return tool->sample(tool, event, sample, evsel, machine);
1356 }
1357
1358 static int deliver_sample_group(struct evlist *evlist,
1359 struct perf_tool *tool,
1360 union perf_event *event,
1361 struct perf_sample *sample,
1362 struct machine *machine)
1363 {
1364 int ret = -EINVAL;
1365 u64 i;
1366
1367 for (i = 0; i < sample->read.group.nr; i++) {
1368 ret = deliver_sample_value(evlist, tool, event, sample,
1369 &sample->read.group.values[i],
1370 machine);
1371 if (ret)
1372 break;
1373 }
1374
1375 return ret;
1376 }
1377
1378 static int
1379 perf_evlist__deliver_sample(struct evlist *evlist,
1380 struct perf_tool *tool,
1381 union perf_event *event,
1382 struct perf_sample *sample,
1383 struct evsel *evsel,
1384 struct machine *machine)
1385 {
1386 /* We know evsel != NULL. */
1387 u64 sample_type = evsel->core.attr.sample_type;
1388 u64 read_format = evsel->core.attr.read_format;
1389
1390 /* Standard sample delivery. */
1391 if (!(sample_type & PERF_SAMPLE_READ))
1392 return tool->sample(tool, event, sample, evsel, machine);
1393
1394 /* For PERF_SAMPLE_READ we have either single or group mode. */
1395 if (read_format & PERF_FORMAT_GROUP)
1396 return deliver_sample_group(evlist, tool, event, sample,
1397 machine);
1398 else
1399 return deliver_sample_value(evlist, tool, event, sample,
1400 &sample->read.one, machine);
1401 }
1402
1403 static int machines__deliver_event(struct machines *machines,
1404 struct evlist *evlist,
1405 union perf_event *event,
1406 struct perf_sample *sample,
1407 struct perf_tool *tool, u64 file_offset)
1408 {
1409 struct evsel *evsel;
1410 struct machine *machine;
1411
1412 dump_event(evlist, event, file_offset, sample);
1413
1414 evsel = perf_evlist__id2evsel(evlist, sample->id);
1415
1416 machine = machines__find_for_cpumode(machines, event, sample);
1417
1418 switch (event->header.type) {
1419 case PERF_RECORD_SAMPLE:
1420 if (evsel == NULL) {
1421 ++evlist->stats.nr_unknown_id;
1422 return 0;
1423 }
1424 dump_sample(evsel, event, sample);
1425 if (machine == NULL) {
1426 ++evlist->stats.nr_unprocessable_samples;
1427 return 0;
1428 }
1429 return perf_evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1430 case PERF_RECORD_MMAP:
1431 return tool->mmap(tool, event, sample, machine);
1432 case PERF_RECORD_MMAP2:
1433 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1434 ++evlist->stats.nr_proc_map_timeout;
1435 return tool->mmap2(tool, event, sample, machine);
1436 case PERF_RECORD_COMM:
1437 return tool->comm(tool, event, sample, machine);
1438 case PERF_RECORD_NAMESPACES:
1439 return tool->namespaces(tool, event, sample, machine);
1440 case PERF_RECORD_CGROUP:
1441 return tool->cgroup(tool, event, sample, machine);
1442 case PERF_RECORD_FORK:
1443 return tool->fork(tool, event, sample, machine);
1444 case PERF_RECORD_EXIT:
1445 return tool->exit(tool, event, sample, machine);
1446 case PERF_RECORD_LOST:
1447 if (tool->lost == perf_event__process_lost)
1448 evlist->stats.total_lost += event->lost.lost;
1449 return tool->lost(tool, event, sample, machine);
1450 case PERF_RECORD_LOST_SAMPLES:
1451 if (tool->lost_samples == perf_event__process_lost_samples)
1452 evlist->stats.total_lost_samples += event->lost_samples.lost;
1453 return tool->lost_samples(tool, event, sample, machine);
1454 case PERF_RECORD_READ:
1455 dump_read(evsel, event);
1456 return tool->read(tool, event, sample, evsel, machine);
1457 case PERF_RECORD_THROTTLE:
1458 return tool->throttle(tool, event, sample, machine);
1459 case PERF_RECORD_UNTHROTTLE:
1460 return tool->unthrottle(tool, event, sample, machine);
1461 case PERF_RECORD_AUX:
1462 if (tool->aux == perf_event__process_aux) {
1463 if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1464 evlist->stats.total_aux_lost += 1;
1465 if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1466 evlist->stats.total_aux_partial += 1;
1467 }
1468 return tool->aux(tool, event, sample, machine);
1469 case PERF_RECORD_ITRACE_START:
1470 return tool->itrace_start(tool, event, sample, machine);
1471 case PERF_RECORD_SWITCH:
1472 case PERF_RECORD_SWITCH_CPU_WIDE:
1473 return tool->context_switch(tool, event, sample, machine);
1474 case PERF_RECORD_KSYMBOL:
1475 return tool->ksymbol(tool, event, sample, machine);
1476 case PERF_RECORD_BPF_EVENT:
1477 return tool->bpf(tool, event, sample, machine);
1478 default:
1479 ++evlist->stats.nr_unknown_events;
1480 return -1;
1481 }
1482 }
1483
1484 static int perf_session__deliver_event(struct perf_session *session,
1485 union perf_event *event,
1486 struct perf_tool *tool,
1487 u64 file_offset)
1488 {
1489 struct perf_sample sample;
1490 int ret;
1491
1492 ret = perf_evlist__parse_sample(session->evlist, event, &sample);
1493 if (ret) {
1494 pr_err("Can't parse sample, err = %d\n", ret);
1495 return ret;
1496 }
1497
1498 ret = auxtrace__process_event(session, event, &sample, tool);
1499 if (ret < 0)
1500 return ret;
1501 if (ret > 0)
1502 return 0;
1503
1504 ret = machines__deliver_event(&session->machines, session->evlist,
1505 event, &sample, tool, file_offset);
1506
1507 if (dump_trace && sample.aux_sample.size)
1508 auxtrace__dump_auxtrace_sample(session, &sample);
1509
1510 return ret;
1511 }
1512
1513 static s64 perf_session__process_user_event(struct perf_session *session,
1514 union perf_event *event,
1515 u64 file_offset)
1516 {
1517 struct ordered_events *oe = &session->ordered_events;
1518 struct perf_tool *tool = session->tool;
1519 struct perf_sample sample = { .time = 0, };
1520 int fd = perf_data__fd(session->data);
1521 int err;
1522
1523 if (event->header.type != PERF_RECORD_COMPRESSED ||
1524 tool->compressed == perf_session__process_compressed_event_stub)
1525 dump_event(session->evlist, event, file_offset, &sample);
1526
1527 /* These events are processed right away */
1528 switch (event->header.type) {
1529 case PERF_RECORD_HEADER_ATTR:
1530 err = tool->attr(tool, event, &session->evlist);
1531 if (err == 0) {
1532 perf_session__set_id_hdr_size(session);
1533 perf_session__set_comm_exec(session);
1534 }
1535 return err;
1536 case PERF_RECORD_EVENT_UPDATE:
1537 return tool->event_update(tool, event, &session->evlist);
1538 case PERF_RECORD_HEADER_EVENT_TYPE:
1539 /*
1540 * Depreceated, but we need to handle it for sake
1541 * of old data files create in pipe mode.
1542 */
1543 return 0;
1544 case PERF_RECORD_HEADER_TRACING_DATA:
1545 /* setup for reading amidst mmap */
1546 lseek(fd, file_offset, SEEK_SET);
1547 return tool->tracing_data(session, event);
1548 case PERF_RECORD_HEADER_BUILD_ID:
1549 return tool->build_id(session, event);
1550 case PERF_RECORD_FINISHED_ROUND:
1551 return tool->finished_round(tool, event, oe);
1552 case PERF_RECORD_ID_INDEX:
1553 return tool->id_index(session, event);
1554 case PERF_RECORD_AUXTRACE_INFO:
1555 return tool->auxtrace_info(session, event);
1556 case PERF_RECORD_AUXTRACE:
1557 /* setup for reading amidst mmap */
1558 lseek(fd, file_offset + event->header.size, SEEK_SET);
1559 return tool->auxtrace(session, event);
1560 case PERF_RECORD_AUXTRACE_ERROR:
1561 perf_session__auxtrace_error_inc(session, event);
1562 return tool->auxtrace_error(session, event);
1563 case PERF_RECORD_THREAD_MAP:
1564 return tool->thread_map(session, event);
1565 case PERF_RECORD_CPU_MAP:
1566 return tool->cpu_map(session, event);
1567 case PERF_RECORD_STAT_CONFIG:
1568 return tool->stat_config(session, event);
1569 case PERF_RECORD_STAT:
1570 return tool->stat(session, event);
1571 case PERF_RECORD_STAT_ROUND:
1572 return tool->stat_round(session, event);
1573 case PERF_RECORD_TIME_CONV:
1574 session->time_conv = event->time_conv;
1575 return tool->time_conv(session, event);
1576 case PERF_RECORD_HEADER_FEATURE:
1577 return tool->feature(session, event);
1578 case PERF_RECORD_COMPRESSED:
1579 err = tool->compressed(session, event, file_offset);
1580 if (err)
1581 dump_event(session->evlist, event, file_offset, &sample);
1582 return err;
1583 default:
1584 return -EINVAL;
1585 }
1586 }
1587
1588 int perf_session__deliver_synth_event(struct perf_session *session,
1589 union perf_event *event,
1590 struct perf_sample *sample)
1591 {
1592 struct evlist *evlist = session->evlist;
1593 struct perf_tool *tool = session->tool;
1594
1595 events_stats__inc(&evlist->stats, event->header.type);
1596
1597 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1598 return perf_session__process_user_event(session, event, 0);
1599
1600 return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0);
1601 }
1602
1603 static void event_swap(union perf_event *event, bool sample_id_all)
1604 {
1605 perf_event__swap_op swap;
1606
1607 swap = perf_event__swap_ops[event->header.type];
1608 if (swap)
1609 swap(event, sample_id_all);
1610 }
1611
1612 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1613 void *buf, size_t buf_sz,
1614 union perf_event **event_ptr,
1615 struct perf_sample *sample)
1616 {
1617 union perf_event *event;
1618 size_t hdr_sz, rest;
1619 int fd;
1620
1621 if (session->one_mmap && !session->header.needs_swap) {
1622 event = file_offset - session->one_mmap_offset +
1623 session->one_mmap_addr;
1624 goto out_parse_sample;
1625 }
1626
1627 if (perf_data__is_pipe(session->data))
1628 return -1;
1629
1630 fd = perf_data__fd(session->data);
1631 hdr_sz = sizeof(struct perf_event_header);
1632
1633 if (buf_sz < hdr_sz)
1634 return -1;
1635
1636 if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1637 readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1638 return -1;
1639
1640 event = (union perf_event *)buf;
1641
1642 if (session->header.needs_swap)
1643 perf_event_header__bswap(&event->header);
1644
1645 if (event->header.size < hdr_sz || event->header.size > buf_sz)
1646 return -1;
1647
1648 rest = event->header.size - hdr_sz;
1649
1650 if (readn(fd, buf, rest) != (ssize_t)rest)
1651 return -1;
1652
1653 if (session->header.needs_swap)
1654 event_swap(event, perf_evlist__sample_id_all(session->evlist));
1655
1656 out_parse_sample:
1657
1658 if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1659 perf_evlist__parse_sample(session->evlist, event, sample))
1660 return -1;
1661
1662 *event_ptr = event;
1663
1664 return 0;
1665 }
1666
1667 int perf_session__peek_events(struct perf_session *session, u64 offset,
1668 u64 size, peek_events_cb_t cb, void *data)
1669 {
1670 u64 max_offset = offset + size;
1671 char buf[PERF_SAMPLE_MAX_SIZE];
1672 union perf_event *event;
1673 int err;
1674
1675 do {
1676 err = perf_session__peek_event(session, offset, buf,
1677 PERF_SAMPLE_MAX_SIZE, &event,
1678 NULL);
1679 if (err)
1680 return err;
1681
1682 err = cb(session, event, offset, data);
1683 if (err)
1684 return err;
1685
1686 offset += event->header.size;
1687 if (event->header.type == PERF_RECORD_AUXTRACE)
1688 offset += event->auxtrace.size;
1689
1690 } while (offset < max_offset);
1691
1692 return err;
1693 }
1694
1695 static s64 perf_session__process_event(struct perf_session *session,
1696 union perf_event *event, u64 file_offset)
1697 {
1698 struct evlist *evlist = session->evlist;
1699 struct perf_tool *tool = session->tool;
1700 int ret;
1701
1702 if (session->header.needs_swap)
1703 event_swap(event, perf_evlist__sample_id_all(evlist));
1704
1705 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1706 return -EINVAL;
1707
1708 events_stats__inc(&evlist->stats, event->header.type);
1709
1710 if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1711 return perf_session__process_user_event(session, event, file_offset);
1712
1713 if (tool->ordered_events) {
1714 u64 timestamp = -1ULL;
1715
1716 ret = perf_evlist__parse_sample_timestamp(evlist, event, &timestamp);
1717 if (ret && ret != -1)
1718 return ret;
1719
1720 ret = perf_session__queue_event(session, event, timestamp, file_offset);
1721 if (ret != -ETIME)
1722 return ret;
1723 }
1724
1725 return perf_session__deliver_event(session, event, tool, file_offset);
1726 }
1727
1728 void perf_event_header__bswap(struct perf_event_header *hdr)
1729 {
1730 hdr->type = bswap_32(hdr->type);
1731 hdr->misc = bswap_16(hdr->misc);
1732 hdr->size = bswap_16(hdr->size);
1733 }
1734
1735 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1736 {
1737 return machine__findnew_thread(&session->machines.host, -1, pid);
1738 }
1739
1740 /*
1741 * Threads are identified by pid and tid, and the idle task has pid == tid == 0.
1742 * So here a single thread is created for that, but actually there is a separate
1743 * idle task per cpu, so there should be one 'struct thread' per cpu, but there
1744 * is only 1. That causes problems for some tools, requiring workarounds. For
1745 * example get_idle_thread() in builtin-sched.c, or thread_stack__per_cpu().
1746 */
1747 int perf_session__register_idle_thread(struct perf_session *session)
1748 {
1749 struct thread *thread;
1750 int err = 0;
1751
1752 thread = machine__findnew_thread(&session->machines.host, 0, 0);
1753 if (thread == NULL || thread__set_comm(thread, "swapper", 0)) {
1754 pr_err("problem inserting idle task.\n");
1755 err = -1;
1756 }
1757
1758 if (thread == NULL || thread__set_namespaces(thread, 0, NULL)) {
1759 pr_err("problem inserting idle task.\n");
1760 err = -1;
1761 }
1762
1763 /* machine__findnew_thread() got the thread, so put it */
1764 thread__put(thread);
1765 return err;
1766 }
1767
1768 static void
1769 perf_session__warn_order(const struct perf_session *session)
1770 {
1771 const struct ordered_events *oe = &session->ordered_events;
1772 struct evsel *evsel;
1773 bool should_warn = true;
1774
1775 evlist__for_each_entry(session->evlist, evsel) {
1776 if (evsel->core.attr.write_backward)
1777 should_warn = false;
1778 }
1779
1780 if (!should_warn)
1781 return;
1782 if (oe->nr_unordered_events != 0)
1783 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1784 }
1785
1786 static void perf_session__warn_about_errors(const struct perf_session *session)
1787 {
1788 const struct events_stats *stats = &session->evlist->stats;
1789
1790 if (session->tool->lost == perf_event__process_lost &&
1791 stats->nr_events[PERF_RECORD_LOST] != 0) {
1792 ui__warning("Processed %d events and lost %d chunks!\n\n"
1793 "Check IO/CPU overload!\n\n",
1794 stats->nr_events[0],
1795 stats->nr_events[PERF_RECORD_LOST]);
1796 }
1797
1798 if (session->tool->lost_samples == perf_event__process_lost_samples) {
1799 double drop_rate;
1800
1801 drop_rate = (double)stats->total_lost_samples /
1802 (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1803 if (drop_rate > 0.05) {
1804 ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1805 stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1806 drop_rate * 100.0);
1807 }
1808 }
1809
1810 if (session->tool->aux == perf_event__process_aux &&
1811 stats->total_aux_lost != 0) {
1812 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1813 stats->total_aux_lost,
1814 stats->nr_events[PERF_RECORD_AUX]);
1815 }
1816
1817 if (session->tool->aux == perf_event__process_aux &&
1818 stats->total_aux_partial != 0) {
1819 bool vmm_exclusive = false;
1820
1821 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1822 &vmm_exclusive);
1823
1824 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1825 "Are you running a KVM guest in the background?%s\n\n",
1826 stats->total_aux_partial,
1827 stats->nr_events[PERF_RECORD_AUX],
1828 vmm_exclusive ?
1829 "\nReloading kvm_intel module with vmm_exclusive=0\n"
1830 "will reduce the gaps to only guest's timeslices." :
1831 "");
1832 }
1833
1834 if (stats->nr_unknown_events != 0) {
1835 ui__warning("Found %u unknown events!\n\n"
1836 "Is this an older tool processing a perf.data "
1837 "file generated by a more recent tool?\n\n"
1838 "If that is not the case, consider "
1839 "reporting to linux-kernel@vger.kernel.org.\n\n",
1840 stats->nr_unknown_events);
1841 }
1842
1843 if (stats->nr_unknown_id != 0) {
1844 ui__warning("%u samples with id not present in the header\n",
1845 stats->nr_unknown_id);
1846 }
1847
1848 if (stats->nr_invalid_chains != 0) {
1849 ui__warning("Found invalid callchains!\n\n"
1850 "%u out of %u events were discarded for this reason.\n\n"
1851 "Consider reporting to linux-kernel@vger.kernel.org.\n\n",
1852 stats->nr_invalid_chains,
1853 stats->nr_events[PERF_RECORD_SAMPLE]);
1854 }
1855
1856 if (stats->nr_unprocessable_samples != 0) {
1857 ui__warning("%u unprocessable samples recorded.\n"
1858 "Do you have a KVM guest running and not using 'perf kvm'?\n",
1859 stats->nr_unprocessable_samples);
1860 }
1861
1862 perf_session__warn_order(session);
1863
1864 events_stats__auxtrace_error_warn(stats);
1865
1866 if (stats->nr_proc_map_timeout != 0) {
1867 ui__warning("%d map information files for pre-existing threads were\n"
1868 "not processed, if there are samples for addresses they\n"
1869 "will not be resolved, you may find out which are these\n"
1870 "threads by running with -v and redirecting the output\n"
1871 "to a file.\n"
1872 "The time limit to process proc map is too short?\n"
1873 "Increase it by --proc-map-timeout\n",
1874 stats->nr_proc_map_timeout);
1875 }
1876 }
1877
1878 static int perf_session__flush_thread_stack(struct thread *thread,
1879 void *p __maybe_unused)
1880 {
1881 return thread_stack__flush(thread);
1882 }
1883
1884 static int perf_session__flush_thread_stacks(struct perf_session *session)
1885 {
1886 return machines__for_each_thread(&session->machines,
1887 perf_session__flush_thread_stack,
1888 NULL);
1889 }
1890
1891 volatile int session_done;
1892
1893 static int __perf_session__process_decomp_events(struct perf_session *session);
1894
1895 static int __perf_session__process_pipe_events(struct perf_session *session)
1896 {
1897 struct ordered_events *oe = &session->ordered_events;
1898 struct perf_tool *tool = session->tool;
1899 int fd = perf_data__fd(session->data);
1900 union perf_event *event;
1901 uint32_t size, cur_size = 0;
1902 void *buf = NULL;
1903 s64 skip = 0;
1904 u64 head;
1905 ssize_t err;
1906 void *p;
1907
1908 perf_tool__fill_defaults(tool);
1909
1910 head = 0;
1911 cur_size = sizeof(union perf_event);
1912
1913 buf = malloc(cur_size);
1914 if (!buf)
1915 return -errno;
1916 ordered_events__set_copy_on_queue(oe, true);
1917 more:
1918 event = buf;
1919 err = readn(fd, event, sizeof(struct perf_event_header));
1920 if (err <= 0) {
1921 if (err == 0)
1922 goto done;
1923
1924 pr_err("failed to read event header\n");
1925 goto out_err;
1926 }
1927
1928 if (session->header.needs_swap)
1929 perf_event_header__bswap(&event->header);
1930
1931 size = event->header.size;
1932 if (size < sizeof(struct perf_event_header)) {
1933 pr_err("bad event header size\n");
1934 goto out_err;
1935 }
1936
1937 if (size > cur_size) {
1938 void *new = realloc(buf, size);
1939 if (!new) {
1940 pr_err("failed to allocate memory to read event\n");
1941 goto out_err;
1942 }
1943 buf = new;
1944 cur_size = size;
1945 event = buf;
1946 }
1947 p = event;
1948 p += sizeof(struct perf_event_header);
1949
1950 if (size - sizeof(struct perf_event_header)) {
1951 err = readn(fd, p, size - sizeof(struct perf_event_header));
1952 if (err <= 0) {
1953 if (err == 0) {
1954 pr_err("unexpected end of event stream\n");
1955 goto done;
1956 }
1957
1958 pr_err("failed to read event data\n");
1959 goto out_err;
1960 }
1961 }
1962
1963 if ((skip = perf_session__process_event(session, event, head)) < 0) {
1964 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
1965 head, event->header.size, event->header.type);
1966 err = -EINVAL;
1967 goto out_err;
1968 }
1969
1970 head += size;
1971
1972 if (skip > 0)
1973 head += skip;
1974
1975 err = __perf_session__process_decomp_events(session);
1976 if (err)
1977 goto out_err;
1978
1979 if (!session_done())
1980 goto more;
1981 done:
1982 /* do the final flush for ordered samples */
1983 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
1984 if (err)
1985 goto out_err;
1986 err = auxtrace__flush_events(session, tool);
1987 if (err)
1988 goto out_err;
1989 err = perf_session__flush_thread_stacks(session);
1990 out_err:
1991 free(buf);
1992 if (!tool->no_warn)
1993 perf_session__warn_about_errors(session);
1994 ordered_events__free(&session->ordered_events);
1995 auxtrace__free_events(session);
1996 return err;
1997 }
1998
1999 static union perf_event *
2000 prefetch_event(char *buf, u64 head, size_t mmap_size,
2001 bool needs_swap, union perf_event *error)
2002 {
2003 union perf_event *event;
2004
2005 /*
2006 * Ensure we have enough space remaining to read
2007 * the size of the event in the headers.
2008 */
2009 if (head + sizeof(event->header) > mmap_size)
2010 return NULL;
2011
2012 event = (union perf_event *)(buf + head);
2013 if (needs_swap)
2014 perf_event_header__bswap(&event->header);
2015
2016 if (head + event->header.size <= mmap_size)
2017 return event;
2018
2019 /* We're not fetching the event so swap back again */
2020 if (needs_swap)
2021 perf_event_header__bswap(&event->header);
2022
2023 pr_debug("%s: head=%#" PRIx64 " event->header_size=%#x, mmap_size=%#zx:"
2024 " fuzzed or compressed perf.data?\n",__func__, head, event->header.size, mmap_size);
2025
2026 return error;
2027 }
2028
2029 static union perf_event *
2030 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2031 {
2032 return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2033 }
2034
2035 static union perf_event *
2036 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2037 {
2038 return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2039 }
2040
2041 static int __perf_session__process_decomp_events(struct perf_session *session)
2042 {
2043 s64 skip;
2044 u64 size, file_pos = 0;
2045 struct decomp *decomp = session->decomp_last;
2046
2047 if (!decomp)
2048 return 0;
2049
2050 while (decomp->head < decomp->size && !session_done()) {
2051 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2052 session->header.needs_swap);
2053
2054 if (!event)
2055 break;
2056
2057 size = event->header.size;
2058
2059 if (size < sizeof(struct perf_event_header) ||
2060 (skip = perf_session__process_event(session, event, file_pos)) < 0) {
2061 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2062 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2063 return -EINVAL;
2064 }
2065
2066 if (skip)
2067 size += skip;
2068
2069 decomp->head += size;
2070 }
2071
2072 return 0;
2073 }
2074
2075 /*
2076 * On 64bit we can mmap the data file in one go. No need for tiny mmap
2077 * slices. On 32bit we use 32MB.
2078 */
2079 #if BITS_PER_LONG == 64
2080 #define MMAP_SIZE ULLONG_MAX
2081 #define NUM_MMAPS 1
2082 #else
2083 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2084 #define NUM_MMAPS 128
2085 #endif
2086
2087 struct reader;
2088
2089 typedef s64 (*reader_cb_t)(struct perf_session *session,
2090 union perf_event *event,
2091 u64 file_offset);
2092
2093 struct reader {
2094 int fd;
2095 u64 data_size;
2096 u64 data_offset;
2097 reader_cb_t process;
2098 };
2099
2100 static int
2101 reader__process_events(struct reader *rd, struct perf_session *session,
2102 struct ui_progress *prog)
2103 {
2104 u64 data_size = rd->data_size;
2105 u64 head, page_offset, file_offset, file_pos, size;
2106 int err = 0, mmap_prot, mmap_flags, map_idx = 0;
2107 size_t mmap_size;
2108 char *buf, *mmaps[NUM_MMAPS];
2109 union perf_event *event;
2110 s64 skip;
2111
2112 page_offset = page_size * (rd->data_offset / page_size);
2113 file_offset = page_offset;
2114 head = rd->data_offset - page_offset;
2115
2116 ui_progress__init_size(prog, data_size, "Processing events...");
2117
2118 data_size += rd->data_offset;
2119
2120 mmap_size = MMAP_SIZE;
2121 if (mmap_size > data_size) {
2122 mmap_size = data_size;
2123 session->one_mmap = true;
2124 }
2125
2126 memset(mmaps, 0, sizeof(mmaps));
2127
2128 mmap_prot = PROT_READ;
2129 mmap_flags = MAP_SHARED;
2130
2131 if (session->header.needs_swap) {
2132 mmap_prot |= PROT_WRITE;
2133 mmap_flags = MAP_PRIVATE;
2134 }
2135 remap:
2136 buf = mmap(NULL, mmap_size, mmap_prot, mmap_flags, rd->fd,
2137 file_offset);
2138 if (buf == MAP_FAILED) {
2139 pr_err("failed to mmap file\n");
2140 err = -errno;
2141 goto out;
2142 }
2143 mmaps[map_idx] = buf;
2144 map_idx = (map_idx + 1) & (ARRAY_SIZE(mmaps) - 1);
2145 file_pos = file_offset + head;
2146 if (session->one_mmap) {
2147 session->one_mmap_addr = buf;
2148 session->one_mmap_offset = file_offset;
2149 }
2150
2151 more:
2152 event = fetch_mmaped_event(head, mmap_size, buf, session->header.needs_swap);
2153 if (IS_ERR(event))
2154 return PTR_ERR(event);
2155
2156 if (!event) {
2157 if (mmaps[map_idx]) {
2158 munmap(mmaps[map_idx], mmap_size);
2159 mmaps[map_idx] = NULL;
2160 }
2161
2162 page_offset = page_size * (head / page_size);
2163 file_offset += page_offset;
2164 head -= page_offset;
2165 goto remap;
2166 }
2167
2168 size = event->header.size;
2169
2170 skip = -EINVAL;
2171
2172 if (size < sizeof(struct perf_event_header) ||
2173 (skip = rd->process(session, event, file_pos)) < 0) {
2174 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2175 file_offset + head, event->header.size,
2176 event->header.type, strerror(-skip));
2177 err = skip;
2178 goto out;
2179 }
2180
2181 if (skip)
2182 size += skip;
2183
2184 head += size;
2185 file_pos += size;
2186
2187 err = __perf_session__process_decomp_events(session);
2188 if (err)
2189 goto out;
2190
2191 ui_progress__update(prog, size);
2192
2193 if (session_done())
2194 goto out;
2195
2196 if (file_pos < data_size)
2197 goto more;
2198
2199 out:
2200 return err;
2201 }
2202
2203 static s64 process_simple(struct perf_session *session,
2204 union perf_event *event,
2205 u64 file_offset)
2206 {
2207 return perf_session__process_event(session, event, file_offset);
2208 }
2209
2210 static int __perf_session__process_events(struct perf_session *session)
2211 {
2212 struct reader rd = {
2213 .fd = perf_data__fd(session->data),
2214 .data_size = session->header.data_size,
2215 .data_offset = session->header.data_offset,
2216 .process = process_simple,
2217 };
2218 struct ordered_events *oe = &session->ordered_events;
2219 struct perf_tool *tool = session->tool;
2220 struct ui_progress prog;
2221 int err;
2222
2223 perf_tool__fill_defaults(tool);
2224
2225 if (rd.data_size == 0)
2226 return -1;
2227
2228 ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2229
2230 err = reader__process_events(&rd, session, &prog);
2231 if (err)
2232 goto out_err;
2233 /* do the final flush for ordered samples */
2234 err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2235 if (err)
2236 goto out_err;
2237 err = auxtrace__flush_events(session, tool);
2238 if (err)
2239 goto out_err;
2240 err = perf_session__flush_thread_stacks(session);
2241 out_err:
2242 ui_progress__finish();
2243 if (!tool->no_warn)
2244 perf_session__warn_about_errors(session);
2245 /*
2246 * We may switching perf.data output, make ordered_events
2247 * reusable.
2248 */
2249 ordered_events__reinit(&session->ordered_events);
2250 auxtrace__free_events(session);
2251 session->one_mmap = false;
2252 return err;
2253 }
2254
2255 int perf_session__process_events(struct perf_session *session)
2256 {
2257 if (perf_session__register_idle_thread(session) < 0)
2258 return -ENOMEM;
2259
2260 if (perf_data__is_pipe(session->data))
2261 return __perf_session__process_pipe_events(session);
2262
2263 return __perf_session__process_events(session);
2264 }
2265
2266 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2267 {
2268 struct evsel *evsel;
2269
2270 evlist__for_each_entry(session->evlist, evsel) {
2271 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2272 return true;
2273 }
2274
2275 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2276 return false;
2277 }
2278
2279 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2280 {
2281 char *bracket;
2282 struct ref_reloc_sym *ref;
2283 struct kmap *kmap;
2284
2285 ref = zalloc(sizeof(struct ref_reloc_sym));
2286 if (ref == NULL)
2287 return -ENOMEM;
2288
2289 ref->name = strdup(symbol_name);
2290 if (ref->name == NULL) {
2291 free(ref);
2292 return -ENOMEM;
2293 }
2294
2295 bracket = strchr(ref->name, ']');
2296 if (bracket)
2297 *bracket = '\0';
2298
2299 ref->addr = addr;
2300
2301 kmap = map__kmap(map);
2302 if (kmap)
2303 kmap->ref_reloc_sym = ref;
2304
2305 return 0;
2306 }
2307
2308 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2309 {
2310 return machines__fprintf_dsos(&session->machines, fp);
2311 }
2312
2313 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2314 bool (skip)(struct dso *dso, int parm), int parm)
2315 {
2316 return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2317 }
2318
2319 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
2320 {
2321 size_t ret;
2322 const char *msg = "";
2323
2324 if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2325 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2326
2327 ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2328
2329 ret += events_stats__fprintf(&session->evlist->stats, fp);
2330 return ret;
2331 }
2332
2333 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2334 {
2335 /*
2336 * FIXME: Here we have to actually print all the machines in this
2337 * session, not just the host...
2338 */
2339 return machine__fprintf(&session->machines.host, fp);
2340 }
2341
2342 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2343 unsigned int type)
2344 {
2345 struct evsel *pos;
2346
2347 evlist__for_each_entry(session->evlist, pos) {
2348 if (pos->core.attr.type == type)
2349 return pos;
2350 }
2351 return NULL;
2352 }
2353
2354 int perf_session__cpu_bitmap(struct perf_session *session,
2355 const char *cpu_list, unsigned long *cpu_bitmap)
2356 {
2357 int i, err = -1;
2358 struct perf_cpu_map *map;
2359 int nr_cpus = min(session->header.env.nr_cpus_online, MAX_NR_CPUS);
2360
2361 for (i = 0; i < PERF_TYPE_MAX; ++i) {
2362 struct evsel *evsel;
2363
2364 evsel = perf_session__find_first_evtype(session, i);
2365 if (!evsel)
2366 continue;
2367
2368 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2369 pr_err("File does not contain CPU events. "
2370 "Remove -C option to proceed.\n");
2371 return -1;
2372 }
2373 }
2374
2375 map = perf_cpu_map__new(cpu_list);
2376 if (map == NULL) {
2377 pr_err("Invalid cpu_list\n");
2378 return -1;
2379 }
2380
2381 for (i = 0; i < map->nr; i++) {
2382 int cpu = map->map[i];
2383
2384 if (cpu >= nr_cpus) {
2385 pr_err("Requested CPU %d too large. "
2386 "Consider raising MAX_NR_CPUS\n", cpu);
2387 goto out_delete_map;
2388 }
2389
2390 set_bit(cpu, cpu_bitmap);
2391 }
2392
2393 err = 0;
2394
2395 out_delete_map:
2396 perf_cpu_map__put(map);
2397 return err;
2398 }
2399
2400 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2401 bool full)
2402 {
2403 if (session == NULL || fp == NULL)
2404 return;
2405
2406 fprintf(fp, "# ========\n");
2407 perf_header__fprintf_info(session, fp, full);
2408 fprintf(fp, "# ========\n#\n");
2409 }
2410
2411 int perf_event__process_id_index(struct perf_session *session,
2412 union perf_event *event)
2413 {
2414 struct evlist *evlist = session->evlist;
2415 struct perf_record_id_index *ie = &event->id_index;
2416 size_t i, nr, max_nr;
2417
2418 max_nr = (ie->header.size - sizeof(struct perf_record_id_index)) /
2419 sizeof(struct id_index_entry);
2420 nr = ie->nr;
2421 if (nr > max_nr)
2422 return -EINVAL;
2423
2424 if (dump_trace)
2425 fprintf(stdout, " nr: %zu\n", nr);
2426
2427 for (i = 0; i < nr; i++) {
2428 struct id_index_entry *e = &ie->entries[i];
2429 struct perf_sample_id *sid;
2430
2431 if (dump_trace) {
2432 fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2433 fprintf(stdout, " idx: %"PRI_lu64, e->idx);
2434 fprintf(stdout, " cpu: %"PRI_ld64, e->cpu);
2435 fprintf(stdout, " tid: %"PRI_ld64"\n", e->tid);
2436 }
2437
2438 sid = perf_evlist__id2sid(evlist, e->id);
2439 if (!sid)
2440 return -ENOENT;
2441 sid->idx = e->idx;
2442 sid->cpu = e->cpu;
2443 sid->tid = e->tid;
2444 }
2445 return 0;
2446 }