]> git.ipfire.org Git - thirdparty/linux.git/blame - tools/perf/util/session.c
perf report: Don't abbreviate file paths relative to the cwd
[thirdparty/linux.git] / tools / perf / util / session.c
CommitLineData
b8f46c5a
XG
1#define _FILE_OFFSET_BITS 64
2
94c744b6
ACM
3#include <linux/kernel.h>
4
ba21594c 5#include <byteswap.h>
94c744b6
ACM
6#include <unistd.h>
7#include <sys/types.h>
a41794cd 8#include <sys/mman.h>
94c744b6
ACM
9
10#include "session.h"
a328626b 11#include "sort.h"
94c744b6
ACM
12#include "util.h"
13
14static int perf_session__open(struct perf_session *self, bool force)
15{
16 struct stat input_stat;
17
8dc58101
TZ
18 if (!strcmp(self->filename, "-")) {
19 self->fd_pipe = true;
20 self->fd = STDIN_FILENO;
21
22 if (perf_header__read(self, self->fd) < 0)
23 pr_err("incompatible file format");
24
25 return 0;
26 }
27
f887f301 28 self->fd = open(self->filename, O_RDONLY);
94c744b6 29 if (self->fd < 0) {
0f2c3de2
AI
30 int err = errno;
31
32 pr_err("failed to open %s: %s", self->filename, strerror(err));
33 if (err == ENOENT && !strcmp(self->filename, "perf.data"))
94c744b6
ACM
34 pr_err(" (try 'perf record' first)");
35 pr_err("\n");
36 return -errno;
37 }
38
39 if (fstat(self->fd, &input_stat) < 0)
40 goto out_close;
41
42 if (!force && input_stat.st_uid && (input_stat.st_uid != geteuid())) {
43 pr_err("file %s not owned by current user or root\n",
44 self->filename);
45 goto out_close;
46 }
47
48 if (!input_stat.st_size) {
49 pr_info("zero-sized file (%s), nothing to do!\n",
50 self->filename);
51 goto out_close;
52 }
53
8dc58101 54 if (perf_header__read(self, self->fd) < 0) {
94c744b6
ACM
55 pr_err("incompatible file format");
56 goto out_close;
57 }
58
59 self->size = input_stat.st_size;
60 return 0;
61
62out_close:
63 close(self->fd);
64 self->fd = -1;
65 return -1;
66}
67
8dc58101
TZ
68void perf_session__update_sample_type(struct perf_session *self)
69{
70 self->sample_type = perf_header__sample_type(&self->header);
71}
72
a1645ce1
ZY
73int perf_session__create_kernel_maps(struct perf_session *self)
74{
d118f8ba 75 int ret = machine__create_kernel_maps(&self->host_machine);
a1645ce1 76
a1645ce1 77 if (ret >= 0)
d118f8ba 78 ret = machines__create_guest_kernel_maps(&self->machines);
a1645ce1
ZY
79 return ret;
80}
81
454c407e 82struct perf_session *perf_session__new(const char *filename, int mode, bool force, bool repipe)
94c744b6 83{
b3165f41 84 size_t len = filename ? strlen(filename) + 1 : 0;
94c744b6
ACM
85 struct perf_session *self = zalloc(sizeof(*self) + len);
86
87 if (self == NULL)
88 goto out;
89
90 if (perf_header__init(&self->header) < 0)
4aa65636 91 goto out_free;
94c744b6
ACM
92
93 memcpy(self->filename, filename, len);
b3165f41 94 self->threads = RB_ROOT;
720a3aeb 95 INIT_LIST_HEAD(&self->dead_threads);
1c02c4d2 96 self->hists_tree = RB_ROOT;
b3165f41 97 self->last_match = NULL;
ec913369
ACM
98 self->mmap_window = 32;
99 self->cwd = NULL;
100 self->cwdlen = 0;
23346f21 101 self->machines = RB_ROOT;
454c407e 102 self->repipe = repipe;
c61e52ee 103 INIT_LIST_HEAD(&self->ordered_samples.samples_head);
1f626bc3 104 machine__init(&self->host_machine, "", HOST_KERNEL_ID);
94c744b6 105
64abebf7
ACM
106 if (mode == O_RDONLY) {
107 if (perf_session__open(self, force) < 0)
108 goto out_delete;
109 } else if (mode == O_WRONLY) {
110 /*
111 * In O_RDONLY mode this will be performed when reading the
112 * kernel MMAP event, in event__process_mmap().
113 */
114 if (perf_session__create_kernel_maps(self) < 0)
115 goto out_delete;
116 }
d549c769 117
8dc58101 118 perf_session__update_sample_type(self);
94c744b6
ACM
119out:
120 return self;
4aa65636 121out_free:
94c744b6
ACM
122 free(self);
123 return NULL;
4aa65636
ACM
124out_delete:
125 perf_session__delete(self);
126 return NULL;
94c744b6
ACM
127}
128
129void perf_session__delete(struct perf_session *self)
130{
131 perf_header__exit(&self->header);
132 close(self->fd);
ec913369 133 free(self->cwd);
94c744b6
ACM
134 free(self);
135}
a328626b 136
720a3aeb
ACM
137void perf_session__remove_thread(struct perf_session *self, struct thread *th)
138{
139 rb_erase(&th->rb_node, &self->threads);
140 /*
141 * We may have references to this thread, for instance in some hist_entry
142 * instances, so just move them to a separate list.
143 */
144 list_add_tail(&th->node, &self->dead_threads);
145}
146
a328626b
ACM
147static bool symbol__match_parent_regex(struct symbol *sym)
148{
149 if (sym->name && !regexec(&parent_regex, sym->name, 0, NULL, 0))
150 return 1;
151
152 return 0;
153}
154
b3c9ac08
ACM
155struct map_symbol *perf_session__resolve_callchain(struct perf_session *self,
156 struct thread *thread,
157 struct ip_callchain *chain,
158 struct symbol **parent)
a328626b
ACM
159{
160 u8 cpumode = PERF_RECORD_MISC_USER;
a328626b 161 unsigned int i;
ad5b217b 162 struct map_symbol *syms = calloc(chain->nr, sizeof(*syms));
a328626b 163
ad5b217b
ACM
164 if (!syms)
165 return NULL;
a328626b
ACM
166
167 for (i = 0; i < chain->nr; i++) {
168 u64 ip = chain->ips[i];
169 struct addr_location al;
170
171 if (ip >= PERF_CONTEXT_MAX) {
172 switch (ip) {
173 case PERF_CONTEXT_HV:
174 cpumode = PERF_RECORD_MISC_HYPERVISOR; break;
175 case PERF_CONTEXT_KERNEL:
176 cpumode = PERF_RECORD_MISC_KERNEL; break;
177 case PERF_CONTEXT_USER:
178 cpumode = PERF_RECORD_MISC_USER; break;
179 default:
180 break;
181 }
182 continue;
183 }
184
a1645ce1 185 al.filtered = false;
a328626b 186 thread__find_addr_location(thread, self, cpumode,
a1645ce1 187 MAP__FUNCTION, thread->pid, ip, &al, NULL);
a328626b
ACM
188 if (al.sym != NULL) {
189 if (sort__has_parent && !*parent &&
190 symbol__match_parent_regex(al.sym))
191 *parent = al.sym;
d599db3f 192 if (!symbol_conf.use_callchain)
a328626b 193 break;
b3c9ac08
ACM
194 syms[i].map = al.map;
195 syms[i].sym = al.sym;
a328626b
ACM
196 }
197 }
198
199 return syms;
200}
06aae590
ACM
201
202static int process_event_stub(event_t *event __used,
203 struct perf_session *session __used)
204{
205 dump_printf(": unhandled!\n");
206 return 0;
207}
208
d6b17beb
FW
209static int process_finished_round_stub(event_t *event __used,
210 struct perf_session *session __used,
211 struct perf_event_ops *ops __used)
212{
213 dump_printf(": unhandled!\n");
214 return 0;
215}
216
217static int process_finished_round(event_t *event,
218 struct perf_session *session,
219 struct perf_event_ops *ops);
220
06aae590
ACM
221static void perf_event_ops__fill_defaults(struct perf_event_ops *handler)
222{
55aa640f
ACM
223 if (handler->sample == NULL)
224 handler->sample = process_event_stub;
225 if (handler->mmap == NULL)
226 handler->mmap = process_event_stub;
227 if (handler->comm == NULL)
228 handler->comm = process_event_stub;
229 if (handler->fork == NULL)
230 handler->fork = process_event_stub;
231 if (handler->exit == NULL)
232 handler->exit = process_event_stub;
233 if (handler->lost == NULL)
234 handler->lost = process_event_stub;
235 if (handler->read == NULL)
236 handler->read = process_event_stub;
237 if (handler->throttle == NULL)
238 handler->throttle = process_event_stub;
239 if (handler->unthrottle == NULL)
240 handler->unthrottle = process_event_stub;
2c46dbb5
TZ
241 if (handler->attr == NULL)
242 handler->attr = process_event_stub;
cd19a035
TZ
243 if (handler->event_type == NULL)
244 handler->event_type = process_event_stub;
9215545e
TZ
245 if (handler->tracing_data == NULL)
246 handler->tracing_data = process_event_stub;
c7929e47
TZ
247 if (handler->build_id == NULL)
248 handler->build_id = process_event_stub;
d6b17beb
FW
249 if (handler->finished_round == NULL) {
250 if (handler->ordered_samples)
251 handler->finished_round = process_finished_round;
252 else
253 handler->finished_round = process_finished_round_stub;
254 }
06aae590
ACM
255}
256
ba21594c
ACM
257void mem_bswap_64(void *src, int byte_size)
258{
259 u64 *m = src;
260
261 while (byte_size > 0) {
262 *m = bswap_64(*m);
263 byte_size -= sizeof(u64);
264 ++m;
265 }
266}
267
268static void event__all64_swap(event_t *self)
269{
270 struct perf_event_header *hdr = &self->header;
271 mem_bswap_64(hdr + 1, self->header.size - sizeof(*hdr));
272}
273
274static void event__comm_swap(event_t *self)
275{
276 self->comm.pid = bswap_32(self->comm.pid);
277 self->comm.tid = bswap_32(self->comm.tid);
278}
279
280static void event__mmap_swap(event_t *self)
281{
282 self->mmap.pid = bswap_32(self->mmap.pid);
283 self->mmap.tid = bswap_32(self->mmap.tid);
284 self->mmap.start = bswap_64(self->mmap.start);
285 self->mmap.len = bswap_64(self->mmap.len);
286 self->mmap.pgoff = bswap_64(self->mmap.pgoff);
287}
288
289static void event__task_swap(event_t *self)
290{
291 self->fork.pid = bswap_32(self->fork.pid);
292 self->fork.tid = bswap_32(self->fork.tid);
293 self->fork.ppid = bswap_32(self->fork.ppid);
294 self->fork.ptid = bswap_32(self->fork.ptid);
295 self->fork.time = bswap_64(self->fork.time);
296}
297
298static void event__read_swap(event_t *self)
299{
300 self->read.pid = bswap_32(self->read.pid);
301 self->read.tid = bswap_32(self->read.tid);
302 self->read.value = bswap_64(self->read.value);
303 self->read.time_enabled = bswap_64(self->read.time_enabled);
304 self->read.time_running = bswap_64(self->read.time_running);
305 self->read.id = bswap_64(self->read.id);
306}
307
2c46dbb5
TZ
308static void event__attr_swap(event_t *self)
309{
310 size_t size;
311
312 self->attr.attr.type = bswap_32(self->attr.attr.type);
313 self->attr.attr.size = bswap_32(self->attr.attr.size);
314 self->attr.attr.config = bswap_64(self->attr.attr.config);
315 self->attr.attr.sample_period = bswap_64(self->attr.attr.sample_period);
316 self->attr.attr.sample_type = bswap_64(self->attr.attr.sample_type);
317 self->attr.attr.read_format = bswap_64(self->attr.attr.read_format);
318 self->attr.attr.wakeup_events = bswap_32(self->attr.attr.wakeup_events);
319 self->attr.attr.bp_type = bswap_32(self->attr.attr.bp_type);
320 self->attr.attr.bp_addr = bswap_64(self->attr.attr.bp_addr);
321 self->attr.attr.bp_len = bswap_64(self->attr.attr.bp_len);
322
323 size = self->header.size;
324 size -= (void *)&self->attr.id - (void *)self;
325 mem_bswap_64(self->attr.id, size);
326}
327
cd19a035
TZ
328static void event__event_type_swap(event_t *self)
329{
330 self->event_type.event_type.event_id =
331 bswap_64(self->event_type.event_type.event_id);
332}
333
9215545e
TZ
334static void event__tracing_data_swap(event_t *self)
335{
336 self->tracing_data.size = bswap_32(self->tracing_data.size);
337}
338
ba21594c
ACM
339typedef void (*event__swap_op)(event_t *self);
340
341static event__swap_op event__swap_ops[] = {
342 [PERF_RECORD_MMAP] = event__mmap_swap,
343 [PERF_RECORD_COMM] = event__comm_swap,
344 [PERF_RECORD_FORK] = event__task_swap,
345 [PERF_RECORD_EXIT] = event__task_swap,
346 [PERF_RECORD_LOST] = event__all64_swap,
347 [PERF_RECORD_READ] = event__read_swap,
348 [PERF_RECORD_SAMPLE] = event__all64_swap,
2c46dbb5 349 [PERF_RECORD_HEADER_ATTR] = event__attr_swap,
cd19a035 350 [PERF_RECORD_HEADER_EVENT_TYPE] = event__event_type_swap,
9215545e 351 [PERF_RECORD_HEADER_TRACING_DATA] = event__tracing_data_swap,
c7929e47 352 [PERF_RECORD_HEADER_BUILD_ID] = NULL,
8dc58101 353 [PERF_RECORD_HEADER_MAX] = NULL,
ba21594c
ACM
354};
355
c61e52ee
FW
356struct sample_queue {
357 u64 timestamp;
358 struct sample_event *event;
359 struct list_head list;
360};
361
c61e52ee
FW
362static void flush_sample_queue(struct perf_session *s,
363 struct perf_event_ops *ops)
364{
365 struct list_head *head = &s->ordered_samples.samples_head;
d6b17beb 366 u64 limit = s->ordered_samples.next_flush;
c61e52ee
FW
367 struct sample_queue *tmp, *iter;
368
d6b17beb 369 if (!ops->ordered_samples || !limit)
c61e52ee
FW
370 return;
371
372 list_for_each_entry_safe(iter, tmp, head, list) {
373 if (iter->timestamp > limit)
374 return;
375
376 if (iter == s->ordered_samples.last_inserted)
377 s->ordered_samples.last_inserted = NULL;
378
379 ops->sample((event_t *)iter->event, s);
380
381 s->ordered_samples.last_flush = iter->timestamp;
382 list_del(&iter->list);
383 free(iter->event);
384 free(iter);
385 }
386}
387
d6b17beb
FW
388/*
389 * When perf record finishes a pass on every buffers, it records this pseudo
390 * event.
391 * We record the max timestamp t found in the pass n.
392 * Assuming these timestamps are monotonic across cpus, we know that if
393 * a buffer still has events with timestamps below t, they will be all
394 * available and then read in the pass n + 1.
395 * Hence when we start to read the pass n + 2, we can safely flush every
396 * events with timestamps below t.
397 *
398 * ============ PASS n =================
399 * CPU 0 | CPU 1
400 * |
401 * cnt1 timestamps | cnt2 timestamps
402 * 1 | 2
403 * 2 | 3
404 * - | 4 <--- max recorded
405 *
406 * ============ PASS n + 1 ==============
407 * CPU 0 | CPU 1
408 * |
409 * cnt1 timestamps | cnt2 timestamps
410 * 3 | 5
411 * 4 | 6
412 * 5 | 7 <---- max recorded
413 *
414 * Flush every events below timestamp 4
415 *
416 * ============ PASS n + 2 ==============
417 * CPU 0 | CPU 1
418 * |
419 * cnt1 timestamps | cnt2 timestamps
420 * 6 | 8
421 * 7 | 9
422 * - | 10
423 *
424 * Flush every events below timestamp 7
425 * etc...
426 */
427static int process_finished_round(event_t *event __used,
428 struct perf_session *session,
429 struct perf_event_ops *ops)
430{
431 flush_sample_queue(session, ops);
432 session->ordered_samples.next_flush = session->ordered_samples.max_timestamp;
433
434 return 0;
435}
436
c61e52ee
FW
437static void __queue_sample_end(struct sample_queue *new, struct list_head *head)
438{
439 struct sample_queue *iter;
440
441 list_for_each_entry_reverse(iter, head, list) {
442 if (iter->timestamp < new->timestamp) {
443 list_add(&new->list, &iter->list);
444 return;
445 }
446 }
447
448 list_add(&new->list, head);
449}
450
451static void __queue_sample_before(struct sample_queue *new,
452 struct sample_queue *iter,
453 struct list_head *head)
454{
455 list_for_each_entry_continue_reverse(iter, head, list) {
456 if (iter->timestamp < new->timestamp) {
457 list_add(&new->list, &iter->list);
458 return;
459 }
460 }
461
462 list_add(&new->list, head);
463}
464
465static void __queue_sample_after(struct sample_queue *new,
466 struct sample_queue *iter,
467 struct list_head *head)
468{
469 list_for_each_entry_continue(iter, head, list) {
470 if (iter->timestamp > new->timestamp) {
471 list_add_tail(&new->list, &iter->list);
472 return;
473 }
474 }
475 list_add_tail(&new->list, head);
476}
477
478/* The queue is ordered by time */
479static void __queue_sample_event(struct sample_queue *new,
480 struct perf_session *s)
481{
482 struct sample_queue *last_inserted = s->ordered_samples.last_inserted;
483 struct list_head *head = &s->ordered_samples.samples_head;
484
485
486 if (!last_inserted) {
487 __queue_sample_end(new, head);
488 return;
489 }
490
491 /*
492 * Most of the time the current event has a timestamp
493 * very close to the last event inserted, unless we just switched
494 * to another event buffer. Having a sorting based on a list and
495 * on the last inserted event that is close to the current one is
496 * probably more efficient than an rbtree based sorting.
497 */
498 if (last_inserted->timestamp >= new->timestamp)
499 __queue_sample_before(new, last_inserted, head);
500 else
501 __queue_sample_after(new, last_inserted, head);
502}
503
504static int queue_sample_event(event_t *event, struct sample_data *data,
d6b17beb 505 struct perf_session *s)
c61e52ee
FW
506{
507 u64 timestamp = data->time;
508 struct sample_queue *new;
c61e52ee
FW
509
510
c61e52ee
FW
511 if (timestamp < s->ordered_samples.last_flush) {
512 printf("Warning: Timestamp below last timeslice flush\n");
513 return -EINVAL;
514 }
515
516 new = malloc(sizeof(*new));
517 if (!new)
518 return -ENOMEM;
519
520 new->timestamp = timestamp;
521
522 new->event = malloc(event->header.size);
523 if (!new->event) {
524 free(new);
525 return -ENOMEM;
526 }
527
528 memcpy(new->event, event, event->header.size);
529
530 __queue_sample_event(new, s);
531 s->ordered_samples.last_inserted = new;
532
d6b17beb
FW
533 if (new->timestamp > s->ordered_samples.max_timestamp)
534 s->ordered_samples.max_timestamp = new->timestamp;
c61e52ee
FW
535
536 return 0;
537}
538
539static int perf_session__process_sample(event_t *event, struct perf_session *s,
540 struct perf_event_ops *ops)
541{
542 struct sample_data data;
543
544 if (!ops->ordered_samples)
545 return ops->sample(event, s);
546
547 bzero(&data, sizeof(struct sample_data));
548 event__parse_sample(event, s->sample_type, &data);
549
d6b17beb 550 queue_sample_event(event, &data, s);
c61e52ee
FW
551
552 return 0;
553}
554
06aae590
ACM
555static int perf_session__process_event(struct perf_session *self,
556 event_t *event,
557 struct perf_event_ops *ops,
ba21594c 558 u64 offset, u64 head)
06aae590
ACM
559{
560 trace_event(event);
561
8dc58101 562 if (event->header.type < PERF_RECORD_HEADER_MAX) {
ba21594c 563 dump_printf("%#Lx [%#x]: PERF_RECORD_%s",
0d755034 564 offset + head, event->header.size,
06aae590 565 event__name[event->header.type]);
cee75ac7 566 hists__inc_nr_events(&self->hists, event->header.type);
06aae590
ACM
567 }
568
ba21594c
ACM
569 if (self->header.needs_swap && event__swap_ops[event->header.type])
570 event__swap_ops[event->header.type](event);
571
06aae590
ACM
572 switch (event->header.type) {
573 case PERF_RECORD_SAMPLE:
c61e52ee 574 return perf_session__process_sample(event, self, ops);
06aae590 575 case PERF_RECORD_MMAP:
55aa640f 576 return ops->mmap(event, self);
06aae590 577 case PERF_RECORD_COMM:
55aa640f 578 return ops->comm(event, self);
06aae590 579 case PERF_RECORD_FORK:
55aa640f 580 return ops->fork(event, self);
06aae590 581 case PERF_RECORD_EXIT:
55aa640f 582 return ops->exit(event, self);
06aae590 583 case PERF_RECORD_LOST:
55aa640f 584 return ops->lost(event, self);
06aae590 585 case PERF_RECORD_READ:
55aa640f 586 return ops->read(event, self);
06aae590 587 case PERF_RECORD_THROTTLE:
55aa640f 588 return ops->throttle(event, self);
06aae590 589 case PERF_RECORD_UNTHROTTLE:
55aa640f 590 return ops->unthrottle(event, self);
2c46dbb5
TZ
591 case PERF_RECORD_HEADER_ATTR:
592 return ops->attr(event, self);
cd19a035
TZ
593 case PERF_RECORD_HEADER_EVENT_TYPE:
594 return ops->event_type(event, self);
9215545e
TZ
595 case PERF_RECORD_HEADER_TRACING_DATA:
596 /* setup for reading amidst mmap */
597 lseek(self->fd, offset + head, SEEK_SET);
598 return ops->tracing_data(event, self);
c7929e47
TZ
599 case PERF_RECORD_HEADER_BUILD_ID:
600 return ops->build_id(event, self);
d6b17beb
FW
601 case PERF_RECORD_FINISHED_ROUND:
602 return ops->finished_round(event, self, ops);
06aae590 603 default:
c8446b9b 604 ++self->hists.stats.nr_unknown_events;
06aae590
ACM
605 return -1;
606 }
607}
608
ba21594c
ACM
609void perf_event_header__bswap(struct perf_event_header *self)
610{
611 self->type = bswap_32(self->type);
612 self->misc = bswap_16(self->misc);
613 self->size = bswap_16(self->size);
614}
615
06aae590
ACM
616static struct thread *perf_session__register_idle_thread(struct perf_session *self)
617{
618 struct thread *thread = perf_session__findnew(self, 0);
619
620 if (thread == NULL || thread__set_comm(thread, "swapper")) {
621 pr_err("problem inserting idle task.\n");
622 thread = NULL;
623 }
624
625 return thread;
626}
627
8dc58101
TZ
628int do_read(int fd, void *buf, size_t size)
629{
630 void *buf_start = buf;
631
632 while (size) {
633 int ret = read(fd, buf, size);
634
635 if (ret <= 0)
636 return ret;
637
638 size -= ret;
639 buf += ret;
640 }
641
642 return buf - buf_start;
643}
644
645#define session_done() (*(volatile int *)(&session_done))
646volatile int session_done;
647
648static int __perf_session__process_pipe_events(struct perf_session *self,
649 struct perf_event_ops *ops)
650{
651 event_t event;
652 uint32_t size;
653 int skip = 0;
654 u64 head;
655 int err;
656 void *p;
657
658 perf_event_ops__fill_defaults(ops);
659
660 head = 0;
661more:
662 err = do_read(self->fd, &event, sizeof(struct perf_event_header));
663 if (err <= 0) {
664 if (err == 0)
665 goto done;
666
667 pr_err("failed to read event header\n");
668 goto out_err;
669 }
670
671 if (self->header.needs_swap)
672 perf_event_header__bswap(&event.header);
673
674 size = event.header.size;
675 if (size == 0)
676 size = 8;
677
678 p = &event;
679 p += sizeof(struct perf_event_header);
680
794e43b5
TZ
681 if (size - sizeof(struct perf_event_header)) {
682 err = do_read(self->fd, p,
683 size - sizeof(struct perf_event_header));
684 if (err <= 0) {
685 if (err == 0) {
686 pr_err("unexpected end of event stream\n");
687 goto done;
688 }
8dc58101 689
794e43b5
TZ
690 pr_err("failed to read event data\n");
691 goto out_err;
692 }
8dc58101
TZ
693 }
694
695 if (size == 0 ||
696 (skip = perf_session__process_event(self, &event, ops,
697 0, head)) < 0) {
698 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
699 head, event.header.size, event.header.type);
700 /*
701 * assume we lost track of the stream, check alignment, and
702 * increment a single u64 in the hope to catch on again 'soon'.
703 */
704 if (unlikely(head & 7))
705 head &= ~7ULL;
706
707 size = 8;
708 }
709
710 head += size;
711
712 dump_printf("\n%#Lx [%#x]: event: %d\n",
713 head, event.header.size, event.header.type);
714
715 if (skip > 0)
716 head += skip;
717
718 if (!session_done())
719 goto more;
720done:
721 err = 0;
722out_err:
723 return err;
724}
725
6122e4e4
ACM
726int __perf_session__process_events(struct perf_session *self,
727 u64 data_offset, u64 data_size,
728 u64 file_size, struct perf_event_ops *ops)
06aae590 729{
ba21594c
ACM
730 int err, mmap_prot, mmap_flags;
731 u64 head, shift;
732 u64 offset = 0;
06aae590
ACM
733 size_t page_size;
734 event_t *event;
735 uint32_t size;
736 char *buf;
5f4d3f88
ACM
737 struct ui_progress *progress = ui_progress__new("Processing events...",
738 self->size);
739 if (progress == NULL)
740 return -1;
06aae590 741
06aae590
ACM
742 perf_event_ops__fill_defaults(ops);
743
1b75962e 744 page_size = sysconf(_SC_PAGESIZE);
06aae590 745
6122e4e4 746 head = data_offset;
06aae590
ACM
747 shift = page_size * (head / page_size);
748 offset += shift;
749 head -= shift;
750
ba21594c
ACM
751 mmap_prot = PROT_READ;
752 mmap_flags = MAP_SHARED;
753
754 if (self->header.needs_swap) {
755 mmap_prot |= PROT_WRITE;
756 mmap_flags = MAP_PRIVATE;
757 }
06aae590 758remap:
ba21594c
ACM
759 buf = mmap(NULL, page_size * self->mmap_window, mmap_prot,
760 mmap_flags, self->fd, offset);
06aae590
ACM
761 if (buf == MAP_FAILED) {
762 pr_err("failed to mmap file\n");
763 err = -errno;
764 goto out_err;
765 }
766
767more:
768 event = (event_t *)(buf + head);
5f4d3f88 769 ui_progress__update(progress, offset);
06aae590 770
ba21594c
ACM
771 if (self->header.needs_swap)
772 perf_event_header__bswap(&event->header);
06aae590
ACM
773 size = event->header.size;
774 if (size == 0)
775 size = 8;
776
777 if (head + event->header.size >= page_size * self->mmap_window) {
778 int munmap_ret;
779
780 shift = page_size * (head / page_size);
781
782 munmap_ret = munmap(buf, page_size * self->mmap_window);
783 assert(munmap_ret == 0);
784
785 offset += shift;
786 head -= shift;
787 goto remap;
788 }
789
790 size = event->header.size;
791
ba21594c 792 dump_printf("\n%#Lx [%#x]: event: %d\n",
0d755034 793 offset + head, event->header.size, event->header.type);
06aae590
ACM
794
795 if (size == 0 ||
796 perf_session__process_event(self, event, ops, offset, head) < 0) {
ba21594c 797 dump_printf("%#Lx [%#x]: skipping unknown header type: %d\n",
0d755034 798 offset + head, event->header.size,
06aae590
ACM
799 event->header.type);
800 /*
801 * assume we lost track of the stream, check alignment, and
802 * increment a single u64 in the hope to catch on again 'soon'.
803 */
804 if (unlikely(head & 7))
805 head &= ~7ULL;
806
807 size = 8;
808 }
809
810 head += size;
811
6122e4e4 812 if (offset + head >= data_offset + data_size)
06aae590
ACM
813 goto done;
814
6122e4e4 815 if (offset + head < file_size)
06aae590
ACM
816 goto more;
817done:
818 err = 0;
c61e52ee 819 /* do the final flush for ordered samples */
d6b17beb 820 self->ordered_samples.next_flush = ULLONG_MAX;
c61e52ee 821 flush_sample_queue(self, ops);
06aae590 822out_err:
5f4d3f88 823 ui_progress__delete(progress);
06aae590
ACM
824 return err;
825}
27295592 826
6122e4e4
ACM
827int perf_session__process_events(struct perf_session *self,
828 struct perf_event_ops *ops)
829{
830 int err;
831
832 if (perf_session__register_idle_thread(self) == NULL)
833 return -ENOMEM;
834
835 if (!symbol_conf.full_paths) {
836 char bf[PATH_MAX];
837
838 if (getcwd(bf, sizeof(bf)) == NULL) {
839 err = -errno;
840out_getcwd_err:
841 pr_err("failed to get the current directory\n");
842 goto out_err;
843 }
844 self->cwd = strdup(bf);
845 if (self->cwd == NULL) {
846 err = -ENOMEM;
847 goto out_getcwd_err;
848 }
849 self->cwdlen = strlen(self->cwd);
850 }
851
8dc58101
TZ
852 if (!self->fd_pipe)
853 err = __perf_session__process_events(self,
854 self->header.data_offset,
855 self->header.data_size,
856 self->size, ops);
857 else
858 err = __perf_session__process_pipe_events(self, ops);
6122e4e4
ACM
859out_err:
860 return err;
861}
862
d549c769 863bool perf_session__has_traces(struct perf_session *self, const char *msg)
27295592
ACM
864{
865 if (!(self->sample_type & PERF_SAMPLE_RAW)) {
d549c769
ACM
866 pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
867 return false;
27295592
ACM
868 }
869
d549c769 870 return true;
27295592 871}
56b03f3c 872
a1645ce1 873int perf_session__set_kallsyms_ref_reloc_sym(struct map **maps,
56b03f3c
ACM
874 const char *symbol_name,
875 u64 addr)
876{
877 char *bracket;
9de89fe7 878 enum map_type i;
a1645ce1
ZY
879 struct ref_reloc_sym *ref;
880
881 ref = zalloc(sizeof(struct ref_reloc_sym));
882 if (ref == NULL)
883 return -ENOMEM;
56b03f3c 884
a1645ce1
ZY
885 ref->name = strdup(symbol_name);
886 if (ref->name == NULL) {
887 free(ref);
56b03f3c 888 return -ENOMEM;
a1645ce1 889 }
56b03f3c 890
a1645ce1 891 bracket = strchr(ref->name, ']');
56b03f3c
ACM
892 if (bracket)
893 *bracket = '\0';
894
a1645ce1 895 ref->addr = addr;
9de89fe7
ACM
896
897 for (i = 0; i < MAP__NR_TYPES; ++i) {
a1645ce1
ZY
898 struct kmap *kmap = map__kmap(maps[i]);
899 kmap->ref_reloc_sym = ref;
9de89fe7
ACM
900 }
901
56b03f3c
ACM
902 return 0;
903}
1f626bc3
ACM
904
905size_t perf_session__fprintf_dsos(struct perf_session *self, FILE *fp)
906{
907 return __dsos__fprintf(&self->host_machine.kernel_dsos, fp) +
908 __dsos__fprintf(&self->host_machine.user_dsos, fp) +
909 machines__fprintf_dsos(&self->machines, fp);
910}
f869097e
ACM
911
912size_t perf_session__fprintf_dsos_buildid(struct perf_session *self, FILE *fp,
913 bool with_hits)
914{
915 size_t ret = machine__fprintf_dsos_buildid(&self->host_machine, fp, with_hits);
916 return ret + machines__fprintf_dsos_buildid(&self->machines, fp, with_hits);
917}