]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/nat/linux-btrace.c
Remove ioctl-based procfs support on Solaris
[thirdparty/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
53f81362 22#include "common-defs.h"
7c97f91e 23#include "linux-btrace.h"
361c8ade 24#include "common-regcache.h"
be8b1ea6 25#include "gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
0568462b
MM
27#include "filestuff.h"
28
29#include <inttypes.h>
7c97f91e 30
5b4e221c 31#include <sys/syscall.h>
5b4e221c
MF
32
33#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 34#include <unistd.h>
7c97f91e
MM
35#include <sys/mman.h>
36#include <sys/user.h>
5826e159 37#include "nat/gdb_ptrace.h"
a950d57c 38#include <sys/types.h>
a950d57c 39#include <signal.h>
7c97f91e
MM
40
41/* A branch trace record in perf_event. */
42struct perf_event_bts
43{
44 /* The linear address of the branch source. */
45 uint64_t from;
46
47 /* The linear address of the branch destination. */
48 uint64_t to;
49};
50
51/* A perf_event branch trace sample. */
52struct perf_event_sample
53{
54 /* The perf_event sample header. */
55 struct perf_event_header header;
56
57 /* The perf_event branch tracing payload. */
58 struct perf_event_bts bts;
59};
60
afb778a2
MM
61/* Identify the cpu we're running on. */
62static struct btrace_cpu
63btrace_this_cpu (void)
64{
65 struct btrace_cpu cpu;
66 unsigned int eax, ebx, ecx, edx;
67 int ok;
68
69 memset (&cpu, 0, sizeof (cpu));
70
71 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
72 if (ok != 0)
73 {
74 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
75 && edx == signature_INTEL_edx)
76 {
77 unsigned int cpuid, ignore;
78
79 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
80 if (ok != 0)
81 {
82 cpu.vendor = CV_INTEL;
83
84 cpu.family = (cpuid >> 8) & 0xf;
85 cpu.model = (cpuid >> 4) & 0xf;
86
87 if (cpu.family == 0x6)
88 cpu.model += (cpuid >> 12) & 0xf0;
89 }
90 }
91 }
92
93 return cpu;
94}
95
aadf7753 96/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 97
aadf7753
MM
98static int
99perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 100{
aadf7753 101 return *pev->data_head != pev->last_head;
7c97f91e
MM
102}
103
b20a6524
MM
104/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
105 to the memory holding the copy.
106 The caller is responsible for freeing the memory. */
107
108static gdb_byte *
e7b01ce0
MM
109perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
110 size_t size)
b20a6524
MM
111{
112 const gdb_byte *begin, *end, *start, *stop;
113 gdb_byte *buffer;
e7b01ce0
MM
114 size_t buffer_size;
115 __u64 data_tail;
b20a6524
MM
116
117 if (size == 0)
118 return NULL;
119
db58b373
MM
120 /* We should never ask for more data than the buffer can hold. */
121 buffer_size = pev->size;
122 gdb_assert (size <= buffer_size);
123
124 /* If we ask for more data than we seem to have, we wrap around and read
125 data from the end of the buffer. This is already handled by the %
126 BUFFER_SIZE operation, below. Here, we just need to make sure that we
127 don't underflow.
128
129 Note that this is perfectly OK for perf event buffers where data_head
130 doesn'grow indefinitely and instead wraps around to remain within the
131 buffer's boundaries. */
132 if (data_head < size)
133 data_head += buffer_size;
134
b20a6524
MM
135 gdb_assert (size <= data_head);
136 data_tail = data_head - size;
137
b20a6524
MM
138 begin = pev->mem;
139 start = begin + data_tail % buffer_size;
140 stop = begin + data_head % buffer_size;
141
224c3ddb 142 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
143
144 if (start < stop)
145 memcpy (buffer, start, stop - start);
146 else
147 {
148 end = begin + buffer_size;
149
150 memcpy (buffer, start, end - start);
151 memcpy (buffer + (end - start), begin, stop - begin);
152 }
153
154 return buffer;
155}
156
157/* Copy the perf event buffer data from PEV.
158 Store a pointer to the copy into DATA and its size in SIZE. */
159
160static void
161perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 162 size_t *psize)
b20a6524 163{
e7b01ce0
MM
164 size_t size;
165 __u64 data_head;
b20a6524
MM
166
167 data_head = *pev->data_head;
b20a6524 168 size = pev->size;
b20a6524
MM
169
170 *data = perf_event_read (pev, data_head, size);
171 *psize = size;
172
173 pev->last_head = data_head;
174}
175
176/* Determine the event type.
177 Returns zero on success and fills in TYPE; returns -1 otherwise. */
178
179static int
180perf_event_pt_event_type (int *type)
181{
182 FILE *file;
183 int found;
184
185 file = fopen ("/sys/bus/event_source/devices/intel_pt/type", "r");
186 if (file == NULL)
187 return -1;
188
189 found = fscanf (file, "%d", type);
190
191 fclose (file);
192
193 if (found == 1)
194 return 0;
195 return -1;
196}
197
0568462b
MM
198/* Try to determine the start address of the Linux kernel. */
199
200static uint64_t
201linux_determine_kernel_start (void)
d68e53f4 202{
0568462b
MM
203 static uint64_t kernel_start;
204 static int cached;
d68e53f4 205
0568462b
MM
206 if (cached != 0)
207 return kernel_start;
d68e53f4 208
0568462b
MM
209 cached = 1;
210
d419f42d 211 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
0568462b
MM
212 if (file == NULL)
213 return kernel_start;
214
d419f42d 215 while (!feof (file.get ()))
0568462b
MM
216 {
217 char buffer[1024], symbol[8], *line;
218 uint64_t addr;
219 int match;
220
d419f42d 221 line = fgets (buffer, sizeof (buffer), file.get ());
0568462b
MM
222 if (line == NULL)
223 break;
d68e53f4 224
0568462b
MM
225 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
226 if (match != 2)
227 continue;
d68e53f4 228
0568462b
MM
229 if (strcmp (symbol, "_text") == 0)
230 {
231 kernel_start = addr;
232 break;
233 }
234 }
235
0568462b 236 return kernel_start;
d68e53f4
MM
237}
238
7c97f91e
MM
239/* Check whether an address is in the kernel. */
240
241static inline int
0568462b 242perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 243{
0568462b 244 uint64_t kernel_start;
7c97f91e 245
0568462b
MM
246 kernel_start = linux_determine_kernel_start ();
247 if (kernel_start != 0ull)
248 return (addr >= kernel_start);
7c97f91e 249
0568462b
MM
250 /* If we don't know the kernel's start address, let's check the most
251 significant bit. This will work at least for 64-bit kernels. */
252 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
253}
254
255/* Check whether a perf event record should be skipped. */
256
257static inline int
0568462b 258perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
259{
260 /* The hardware may report branches from kernel into user space. Branches
261 from user into kernel space will be suppressed. We filter the former to
262 provide a consistent branch trace excluding kernel. */
0568462b 263 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
264}
265
266/* Perform a few consistency checks on a perf event sample record. This is
267 meant to catch cases when we get out of sync with the perf event stream. */
268
269static inline int
270perf_event_sample_ok (const struct perf_event_sample *sample)
271{
272 if (sample->header.type != PERF_RECORD_SAMPLE)
273 return 0;
274
275 if (sample->header.size != sizeof (*sample))
276 return 0;
277
278 return 1;
279}
280
281/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
282 and to addresses (plus a header).
283
284 Start points into that buffer at the next sample position.
285 We read the collected samples backwards from start.
286
287 While reading the samples, we convert the information into a list of blocks.
288 For two adjacent samples s1 and s2, we form a block b such that b.begin =
289 s1.to and b.end = s2.from.
290
291 In case the buffer overflows during sampling, one sample may have its lower
292 part at the end and its upper part at the beginning of the buffer. */
293
294static VEC (btrace_block_s) *
295perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
e7b01ce0 296 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e
MM
297{
298 VEC (btrace_block_s) *btrace = NULL;
299 struct perf_event_sample sample;
e7b01ce0 300 size_t read = 0;
7c97f91e
MM
301 struct btrace_block block = { 0, 0 };
302 struct regcache *regcache;
303
304 gdb_assert (begin <= start);
305 gdb_assert (start <= end);
306
307 /* The first block ends at the current pc. */
361c8ade 308 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
309 block.end = regcache_read_pc (regcache);
310
311 /* The buffer may contain a partial record as its last entry (i.e. when the
312 buffer size is not a multiple of the sample size). */
313 read = sizeof (sample) - 1;
314
315 for (; read < size; read += sizeof (sample))
316 {
317 const struct perf_event_sample *psample;
318
319 /* Find the next perf_event sample in a backwards traversal. */
320 start -= sizeof (sample);
321
322 /* If we're still inside the buffer, we're done. */
323 if (begin <= start)
324 psample = (const struct perf_event_sample *) start;
325 else
326 {
327 int missing;
328
329 /* We're to the left of the ring buffer, we will wrap around and
330 reappear at the very right of the ring buffer. */
331
332 missing = (begin - start);
333 start = (end - missing);
334
335 /* If the entire sample is missing, we're done. */
336 if (missing == sizeof (sample))
337 psample = (const struct perf_event_sample *) start;
338 else
339 {
340 uint8_t *stack;
341
342 /* The sample wrapped around. The lower part is at the end and
343 the upper part is at the beginning of the buffer. */
344 stack = (uint8_t *) &sample;
345
346 /* Copy the two parts so we have a contiguous sample. */
347 memcpy (stack, start, missing);
348 memcpy (stack + missing, begin, sizeof (sample) - missing);
349
350 psample = &sample;
351 }
352 }
353
354 if (!perf_event_sample_ok (psample))
355 {
356 warning (_("Branch trace may be incomplete."));
357 break;
358 }
359
0568462b 360 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
361 continue;
362
363 /* We found a valid sample, so we can complete the current block. */
364 block.begin = psample->bts.to;
365
366 VEC_safe_push (btrace_block_s, btrace, &block);
367
368 /* Start the next block. */
369 block.end = psample->bts.from;
370 }
371
969c39fb
MM
372 /* Push the last block (i.e. the first one of inferior execution), as well.
373 We don't know where it ends, but we know where it starts. If we're
374 reading delta trace, we can fill in the start address later on.
375 Otherwise we will prune it. */
376 block.begin = 0;
377 VEC_safe_push (btrace_block_s, btrace, &block);
378
7c97f91e
MM
379 return btrace;
380}
381
043c3577 382/* Check whether the kernel supports BTS. */
a950d57c
MM
383
384static int
043c3577 385kernel_supports_bts (void)
a950d57c
MM
386{
387 struct perf_event_attr attr;
388 pid_t child, pid;
389 int status, file;
390
391 errno = 0;
392 child = fork ();
393 switch (child)
394 {
395 case -1:
76fb6829 396 warning (_("test bts: cannot fork: %s."), safe_strerror (errno));
a950d57c
MM
397 return 0;
398
399 case 0:
400 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
401 if (status != 0)
402 {
043c3577 403 warning (_("test bts: cannot PTRACE_TRACEME: %s."),
76fb6829 404 safe_strerror (errno));
a950d57c
MM
405 _exit (1);
406 }
407
408 status = raise (SIGTRAP);
409 if (status != 0)
410 {
043c3577 411 warning (_("test bts: cannot raise SIGTRAP: %s."),
76fb6829 412 safe_strerror (errno));
a950d57c
MM
413 _exit (1);
414 }
415
416 _exit (1);
417
418 default:
419 pid = waitpid (child, &status, 0);
420 if (pid != child)
421 {
043c3577 422 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 423 (long) pid, safe_strerror (errno));
a950d57c
MM
424 return 0;
425 }
426
427 if (!WIFSTOPPED (status))
428 {
043c3577 429 warning (_("test bts: expected stop. status: %d."),
a950d57c
MM
430 status);
431 return 0;
432 }
433
434 memset (&attr, 0, sizeof (attr));
435
436 attr.type = PERF_TYPE_HARDWARE;
437 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
438 attr.sample_period = 1;
439 attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
440 attr.exclude_kernel = 1;
441 attr.exclude_hv = 1;
442 attr.exclude_idle = 1;
443
444 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
445 if (file >= 0)
446 close (file);
447
448 kill (child, SIGKILL);
449 ptrace (PTRACE_KILL, child, NULL, NULL);
450
451 pid = waitpid (child, &status, 0);
452 if (pid != child)
453 {
043c3577 454 warning (_("test bts: bad pid %ld, error: %s."),
76fb6829 455 (long) pid, safe_strerror (errno));
a950d57c 456 if (!WIFSIGNALED (status))
043c3577 457 warning (_("test bts: expected killed. status: %d."),
a950d57c
MM
458 status);
459 }
460
461 return (file >= 0);
462 }
463}
464
bc504a31 465/* Check whether the kernel supports Intel Processor Trace. */
b20a6524
MM
466
467static int
468kernel_supports_pt (void)
469{
470 struct perf_event_attr attr;
471 pid_t child, pid;
472 int status, file, type;
473
474 errno = 0;
475 child = fork ();
476 switch (child)
477 {
478 case -1:
76fb6829 479 warning (_("test pt: cannot fork: %s."), safe_strerror (errno));
b20a6524
MM
480 return 0;
481
482 case 0:
483 status = ptrace (PTRACE_TRACEME, 0, NULL, NULL);
484 if (status != 0)
485 {
486 warning (_("test pt: cannot PTRACE_TRACEME: %s."),
76fb6829 487 safe_strerror (errno));
b20a6524
MM
488 _exit (1);
489 }
490
491 status = raise (SIGTRAP);
492 if (status != 0)
493 {
494 warning (_("test pt: cannot raise SIGTRAP: %s."),
76fb6829 495 safe_strerror (errno));
b20a6524
MM
496 _exit (1);
497 }
498
499 _exit (1);
500
501 default:
502 pid = waitpid (child, &status, 0);
503 if (pid != child)
504 {
505 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 506 (long) pid, safe_strerror (errno));
b20a6524
MM
507 return 0;
508 }
509
510 if (!WIFSTOPPED (status))
511 {
512 warning (_("test pt: expected stop. status: %d."),
513 status);
514 return 0;
515 }
516
517 status = perf_event_pt_event_type (&type);
518 if (status != 0)
519 file = -1;
520 else
521 {
522 memset (&attr, 0, sizeof (attr));
523
524 attr.size = sizeof (attr);
525 attr.type = type;
526 attr.exclude_kernel = 1;
527 attr.exclude_hv = 1;
528 attr.exclude_idle = 1;
529
530 file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0);
531 if (file >= 0)
532 close (file);
533 }
534
535 kill (child, SIGKILL);
536 ptrace (PTRACE_KILL, child, NULL, NULL);
537
538 pid = waitpid (child, &status, 0);
539 if (pid != child)
540 {
541 warning (_("test pt: bad pid %ld, error: %s."),
76fb6829 542 (long) pid, safe_strerror (errno));
b20a6524
MM
543 if (!WIFSIGNALED (status))
544 warning (_("test pt: expected killed. status: %d."),
545 status);
546 }
547
548 return (file >= 0);
549 }
550}
551
043c3577 552/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
553
554static int
afb778a2 555intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 556{
afb778a2 557 switch (cpu->family)
5f8e0b8f
MF
558 {
559 case 0x6:
afb778a2 560 switch (cpu->model)
5f8e0b8f
MF
561 {
562 case 0x1a: /* Nehalem */
563 case 0x1f:
564 case 0x1e:
565 case 0x2e:
566 case 0x25: /* Westmere */
567 case 0x2c:
568 case 0x2f:
569 case 0x2a: /* Sandy Bridge */
570 case 0x2d:
571 case 0x3a: /* Ivy Bridge */
572
573 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
574 "from" information afer an EIST transition, T-states, C1E, or
575 Adaptive Thermal Throttling. */
576 return 0;
577 }
578 }
a950d57c
MM
579
580 return 1;
a950d57c
MM
581}
582
043c3577 583/* Check whether the cpu supports BTS. */
a950d57c
MM
584
585static int
043c3577 586cpu_supports_bts (void)
a950d57c 587{
afb778a2 588 struct btrace_cpu cpu;
a950d57c 589
afb778a2
MM
590 cpu = btrace_this_cpu ();
591 switch (cpu.vendor)
592 {
593 default:
594 /* Don't know about others. Let's assume they do. */
595 return 1;
a950d57c 596
afb778a2
MM
597 case CV_INTEL:
598 return intel_supports_bts (&cpu);
599 }
a950d57c
MM
600}
601
043c3577 602/* Check whether the linux target supports BTS. */
7c97f91e 603
043c3577
MM
604static int
605linux_supports_bts (void)
7c97f91e 606{
a950d57c
MM
607 static int cached;
608
609 if (cached == 0)
610 {
043c3577 611 if (!kernel_supports_bts ())
a950d57c 612 cached = -1;
043c3577 613 else if (!cpu_supports_bts ())
a950d57c
MM
614 cached = -1;
615 else
616 cached = 1;
617 }
618
619 return cached > 0;
7c97f91e
MM
620}
621
bc504a31 622/* Check whether the linux target supports Intel Processor Trace. */
b20a6524
MM
623
624static int
625linux_supports_pt (void)
626{
627 static int cached;
628
629 if (cached == 0)
630 {
631 if (!kernel_supports_pt ())
632 cached = -1;
633 else
634 cached = 1;
635 }
636
637 return cached > 0;
638}
639
7c97f91e
MM
640/* See linux-btrace.h. */
641
043c3577
MM
642int
643linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
644{
645 switch (format)
646 {
647 case BTRACE_FORMAT_NONE:
648 return 0;
649
650 case BTRACE_FORMAT_BTS:
651 return linux_supports_bts ();
b20a6524
MM
652
653 case BTRACE_FORMAT_PT:
654 return linux_supports_pt ();
043c3577
MM
655 }
656
657 internal_error (__FILE__, __LINE__, _("Unknown branch trace format"));
658}
659
f4abbc16 660/* Enable branch tracing in BTS format. */
043c3577 661
f4abbc16 662static struct btrace_target_info *
d33501a5 663linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 664{
aadf7753 665 struct perf_event_mmap_page *header;
7c97f91e 666 struct btrace_target_info *tinfo;
f4abbc16 667 struct btrace_tinfo_bts *bts;
e7b01ce0
MM
668 size_t size, pages;
669 __u64 data_offset;
d0fa7535 670 int pid, pg;
7c97f91e 671
8d749320 672 tinfo = XCNEW (struct btrace_target_info);
7c97f91e
MM
673 tinfo->ptid = ptid;
674
f4abbc16
MM
675 tinfo->conf.format = BTRACE_FORMAT_BTS;
676 bts = &tinfo->variant.bts;
7c97f91e 677
f4abbc16
MM
678 bts->attr.size = sizeof (bts->attr);
679 bts->attr.type = PERF_TYPE_HARDWARE;
680 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
681 bts->attr.sample_period = 1;
7c97f91e 682
f4abbc16
MM
683 /* We sample from and to address. */
684 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 685
f4abbc16
MM
686 bts->attr.exclude_kernel = 1;
687 bts->attr.exclude_hv = 1;
688 bts->attr.exclude_idle = 1;
7c97f91e
MM
689
690 pid = ptid_get_lwp (ptid);
691 if (pid == 0)
692 pid = ptid_get_pid (ptid);
693
694 errno = 0;
f4abbc16
MM
695 bts->file = syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0);
696 if (bts->file < 0)
b20a6524 697 goto err_out;
7c97f91e 698
d33501a5 699 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
700 pages = ((size_t) conf->size / PAGE_SIZE
701 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
702 /* We need at least one page. */
703 if (pages == 0)
704 pages = 1;
705
706 /* The buffer size can be requested in powers of two pages. Adjust PAGES
707 to the next power of two. */
e7b01ce0
MM
708 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
709 if ((pages & ((size_t) 1 << pg)) != 0)
710 pages += ((size_t) 1 << pg);
d33501a5
MM
711
712 /* We try to allocate the requested size.
713 If that fails, try to get as much as we can. */
714 for (; pages > 0; pages >>= 1)
d0fa7535 715 {
d33501a5 716 size_t length;
e7b01ce0 717 __u64 data_size;
d33501a5 718
e7b01ce0
MM
719 data_size = (__u64) pages * PAGE_SIZE;
720
721 /* Don't ask for more than we can represent in the configuration. */
722 if ((__u64) UINT_MAX < data_size)
723 continue;
724
725 size = (size_t) data_size;
d33501a5
MM
726 length = size + PAGE_SIZE;
727
728 /* Check for overflows. */
e7b01ce0 729 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
730 continue;
731
d0fa7535 732 /* The number of pages we request needs to be a power of two. */
224c3ddb
SM
733 header = ((struct perf_event_mmap_page *)
734 mmap (NULL, length, PROT_READ, MAP_SHARED, bts->file, 0));
aadf7753
MM
735 if (header != MAP_FAILED)
736 break;
d0fa7535 737 }
7c97f91e 738
010a18a1 739 if (pages == 0)
aadf7753
MM
740 goto err_file;
741
010a18a1 742 data_offset = PAGE_SIZE;
010a18a1
MM
743
744#if defined (PERF_ATTR_SIZE_VER5)
745 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
746 {
e7b01ce0
MM
747 __u64 data_size;
748
010a18a1
MM
749 data_offset = header->data_offset;
750 data_size = header->data_size;
e7b01ce0
MM
751
752 size = (unsigned int) data_size;
753
754 /* Check for overflows. */
755 if ((__u64) size != data_size)
756 {
757 munmap ((void *) header, size + PAGE_SIZE);
758 goto err_file;
759 }
010a18a1
MM
760 }
761#endif /* defined (PERF_ATTR_SIZE_VER5) */
762
f4abbc16 763 bts->header = header;
010a18a1 764 bts->bts.mem = ((const uint8_t *) header) + data_offset;
e7b01ce0 765 bts->bts.size = size;
f4abbc16 766 bts->bts.data_head = &header->data_head;
e7b01ce0 767 bts->bts.last_head = 0ull;
aadf7753 768
e7b01ce0 769 tinfo->conf.bts.size = (unsigned int) size;
aadf7753
MM
770 return tinfo;
771
772 err_file:
d0fa7535 773 /* We were not able to allocate any buffer. */
f4abbc16 774 close (bts->file);
7c97f91e 775
b20a6524
MM
776 err_out:
777 xfree (tinfo);
778 return NULL;
779}
780
781#if defined (PERF_ATTR_SIZE_VER5)
782
bc504a31 783/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
784
785static struct btrace_target_info *
786linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
787{
788 struct perf_event_mmap_page *header;
789 struct btrace_target_info *tinfo;
790 struct btrace_tinfo_pt *pt;
e7b01ce0 791 size_t pages, size;
b20a6524
MM
792 int pid, pg, errcode, type;
793
794 if (conf->size == 0)
795 return NULL;
796
797 errcode = perf_event_pt_event_type (&type);
798 if (errcode != 0)
799 return NULL;
800
801 pid = ptid_get_lwp (ptid);
802 if (pid == 0)
803 pid = ptid_get_pid (ptid);
804
8d749320 805 tinfo = XCNEW (struct btrace_target_info);
b20a6524 806 tinfo->ptid = ptid;
b20a6524
MM
807
808 tinfo->conf.format = BTRACE_FORMAT_PT;
809 pt = &tinfo->variant.pt;
810
811 pt->attr.size = sizeof (pt->attr);
812 pt->attr.type = type;
813
814 pt->attr.exclude_kernel = 1;
815 pt->attr.exclude_hv = 1;
816 pt->attr.exclude_idle = 1;
817
818 errno = 0;
819 pt->file = syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0);
820 if (pt->file < 0)
821 goto err;
822
823 /* Allocate the configuration page. */
a55515ee
SM
824 header = ((struct perf_event_mmap_page *)
825 mmap (NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
826 pt->file, 0));
b20a6524
MM
827 if (header == MAP_FAILED)
828 goto err_file;
829
830 header->aux_offset = header->data_offset + header->data_size;
831
832 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
833 pages = ((size_t) conf->size / PAGE_SIZE
834 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
835 /* We need at least one page. */
836 if (pages == 0)
837 pages = 1;
838
839 /* The buffer size can be requested in powers of two pages. Adjust PAGES
840 to the next power of two. */
e7b01ce0
MM
841 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
842 if ((pages & ((size_t) 1 << pg)) != 0)
843 pages += ((size_t) 1 << pg);
b20a6524
MM
844
845 /* We try to allocate the requested size.
846 If that fails, try to get as much as we can. */
847 for (; pages > 0; pages >>= 1)
848 {
849 size_t length;
e7b01ce0 850 __u64 data_size;
b20a6524 851
e7b01ce0
MM
852 data_size = (__u64) pages * PAGE_SIZE;
853
854 /* Don't ask for more than we can represent in the configuration. */
855 if ((__u64) UINT_MAX < data_size)
856 continue;
857
858 size = (size_t) data_size;
b20a6524
MM
859
860 /* Check for overflows. */
e7b01ce0 861 if ((__u64) size != data_size)
b20a6524
MM
862 continue;
863
e7b01ce0
MM
864 header->aux_size = data_size;
865 length = size;
b20a6524 866
a55515ee
SM
867 pt->pt.mem = ((const uint8_t *)
868 mmap (NULL, length, PROT_READ, MAP_SHARED, pt->file,
869 header->aux_offset));
b20a6524
MM
870 if (pt->pt.mem != MAP_FAILED)
871 break;
872 }
873
874 if (pages == 0)
875 goto err_conf;
876
877 pt->header = header;
878 pt->pt.size = size;
879 pt->pt.data_head = &header->aux_head;
880
e7b01ce0 881 tinfo->conf.pt.size = (unsigned int) size;
b20a6524
MM
882 return tinfo;
883
884 err_conf:
885 munmap((void *) header, PAGE_SIZE);
886
887 err_file:
888 close (pt->file);
889
7c97f91e
MM
890 err:
891 xfree (tinfo);
892 return NULL;
893}
894
b20a6524
MM
895#else /* !defined (PERF_ATTR_SIZE_VER5) */
896
897static struct btrace_target_info *
898linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
899{
900 errno = EOPNOTSUPP;
901 return NULL;
902}
903
904#endif /* !defined (PERF_ATTR_SIZE_VER5) */
905
7c97f91e
MM
906/* See linux-btrace.h. */
907
f4abbc16
MM
908struct btrace_target_info *
909linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
910{
911 struct btrace_target_info *tinfo;
912
913 tinfo = NULL;
914 switch (conf->format)
915 {
916 case BTRACE_FORMAT_NONE:
917 break;
918
919 case BTRACE_FORMAT_BTS:
d33501a5 920 tinfo = linux_enable_bts (ptid, &conf->bts);
f4abbc16 921 break;
b20a6524
MM
922
923 case BTRACE_FORMAT_PT:
924 tinfo = linux_enable_pt (ptid, &conf->pt);
925 break;
f4abbc16
MM
926 }
927
928 return tinfo;
929}
930
931/* Disable BTS tracing. */
932
933static enum btrace_error
934linux_disable_bts (struct btrace_tinfo_bts *tinfo)
7c97f91e 935{
aadf7753 936 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
7c97f91e 937 close (tinfo->file);
7c97f91e 938
969c39fb 939 return BTRACE_ERR_NONE;
7c97f91e
MM
940}
941
bc504a31 942/* Disable Intel Processor Trace tracing. */
b20a6524
MM
943
944static enum btrace_error
945linux_disable_pt (struct btrace_tinfo_pt *tinfo)
946{
947 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
948 munmap((void *) tinfo->header, PAGE_SIZE);
949 close (tinfo->file);
950
951 return BTRACE_ERR_NONE;
952}
953
f4abbc16
MM
954/* See linux-btrace.h. */
955
956enum btrace_error
957linux_disable_btrace (struct btrace_target_info *tinfo)
958{
959 enum btrace_error errcode;
960
961 errcode = BTRACE_ERR_NOT_SUPPORTED;
962 switch (tinfo->conf.format)
963 {
964 case BTRACE_FORMAT_NONE:
965 break;
966
967 case BTRACE_FORMAT_BTS:
968 errcode = linux_disable_bts (&tinfo->variant.bts);
969 break;
b20a6524
MM
970
971 case BTRACE_FORMAT_PT:
972 errcode = linux_disable_pt (&tinfo->variant.pt);
973 break;
f4abbc16
MM
974 }
975
976 if (errcode == BTRACE_ERR_NONE)
977 xfree (tinfo);
978
979 return errcode;
980}
981
734b0e4b
MM
982/* Read branch trace data in BTS format for the thread given by TINFO into
983 BTRACE using the TYPE reading method. */
7c97f91e 984
734b0e4b
MM
985static enum btrace_error
986linux_read_bts (struct btrace_data_bts *btrace,
987 struct btrace_target_info *tinfo,
988 enum btrace_read_type type)
7c97f91e 989{
aadf7753 990 struct perf_event_buffer *pevent;
7c97f91e 991 const uint8_t *begin, *end, *start;
e7b01ce0
MM
992 size_t buffer_size, size;
993 __u64 data_head, data_tail;
aadf7753
MM
994 unsigned int retries = 5;
995
f4abbc16 996 pevent = &tinfo->variant.bts.bts;
7c97f91e 997
969c39fb
MM
998 /* For delta reads, we return at least the partial last block containing
999 the current PC. */
aadf7753 1000 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
969c39fb 1001 return BTRACE_ERR_NONE;
7c97f91e 1002
aadf7753
MM
1003 buffer_size = pevent->size;
1004 data_tail = pevent->last_head;
7c97f91e
MM
1005
1006 /* We may need to retry reading the trace. See below. */
1007 while (retries--)
1008 {
aadf7753 1009 data_head = *pevent->data_head;
7c97f91e 1010
ed9edfb5 1011 /* Delete any leftover trace from the previous iteration. */
734b0e4b 1012 VEC_free (btrace_block_s, btrace->blocks);
ed9edfb5 1013
969c39fb 1014 if (type == BTRACE_READ_DELTA)
7c97f91e 1015 {
e7b01ce0
MM
1016 __u64 data_size;
1017
969c39fb
MM
1018 /* Determine the number of bytes to read and check for buffer
1019 overflows. */
1020
1021 /* Check for data head overflows. We might be able to recover from
1022 those but they are very unlikely and it's not really worth the
1023 effort, I think. */
1024 if (data_head < data_tail)
1025 return BTRACE_ERR_OVERFLOW;
1026
1027 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
1028 data_size = data_head - data_tail;
1029 if (buffer_size < data_size)
969c39fb 1030 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
1031
1032 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
1033 size = (size_t) data_size;
969c39fb
MM
1034 }
1035 else
1036 {
1037 /* Read the entire buffer. */
1038 size = buffer_size;
7c97f91e 1039
969c39fb
MM
1040 /* Adjust the size if the buffer has not overflowed, yet. */
1041 if (data_head < size)
e7b01ce0 1042 size = (size_t) data_head;
7c97f91e
MM
1043 }
1044
969c39fb 1045 /* Data_head keeps growing; the buffer itself is circular. */
aadf7753 1046 begin = pevent->mem;
969c39fb
MM
1047 start = begin + data_head % buffer_size;
1048
1049 if (data_head <= buffer_size)
1050 end = start;
1051 else
aadf7753 1052 end = begin + pevent->size;
969c39fb 1053
734b0e4b 1054 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 1055
7c97f91e
MM
1056 /* The stopping thread notifies its ptracer before it is scheduled out.
1057 On multi-core systems, the debugger might therefore run while the
1058 kernel might be writing the last branch trace records.
1059
1060 Let's check whether the data head moved while we read the trace. */
aadf7753 1061 if (data_head == *pevent->data_head)
7c97f91e
MM
1062 break;
1063 }
1064
aadf7753 1065 pevent->last_head = data_head;
7c97f91e 1066
969c39fb
MM
1067 /* Prune the incomplete last block (i.e. the first one of inferior execution)
1068 if we're not doing a delta read. There is no way of filling in its zeroed
1069 BEGIN element. */
734b0e4b
MM
1070 if (!VEC_empty (btrace_block_s, btrace->blocks)
1071 && type != BTRACE_READ_DELTA)
1072 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1073
1074 return BTRACE_ERR_NONE;
7c97f91e
MM
1075}
1076
bc504a31 1077/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
1078
1079static void
1080linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
1081{
1082 conf->cpu = btrace_this_cpu ();
1083}
1084
bc504a31 1085/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
1086 given by TINFO into BTRACE using the TYPE reading method. */
1087
1088static enum btrace_error
1089linux_read_pt (struct btrace_data_pt *btrace,
1090 struct btrace_target_info *tinfo,
1091 enum btrace_read_type type)
1092{
1093 struct perf_event_buffer *pt;
1094
1095 pt = &tinfo->variant.pt.pt;
1096
1097 linux_fill_btrace_pt_config (&btrace->config);
1098
1099 switch (type)
1100 {
1101 case BTRACE_READ_DELTA:
1102 /* We don't support delta reads. The data head (i.e. aux_head) wraps
1103 around to stay inside the aux buffer. */
1104 return BTRACE_ERR_NOT_SUPPORTED;
1105
1106 case BTRACE_READ_NEW:
1107 if (!perf_event_new_data (pt))
1108 return BTRACE_ERR_NONE;
1109
1110 /* Fall through. */
1111 case BTRACE_READ_ALL:
1112 perf_event_read_all (pt, &btrace->data, &btrace->size);
1113 return BTRACE_ERR_NONE;
1114 }
1115
1116 internal_error (__FILE__, __LINE__, _("Unkown btrace read type."));
1117}
1118
734b0e4b
MM
1119/* See linux-btrace.h. */
1120
1121enum btrace_error
1122linux_read_btrace (struct btrace_data *btrace,
1123 struct btrace_target_info *tinfo,
1124 enum btrace_read_type type)
1125{
f4abbc16
MM
1126 switch (tinfo->conf.format)
1127 {
1128 case BTRACE_FORMAT_NONE:
1129 return BTRACE_ERR_NOT_SUPPORTED;
1130
1131 case BTRACE_FORMAT_BTS:
1132 /* We read btrace in BTS format. */
1133 btrace->format = BTRACE_FORMAT_BTS;
1134 btrace->variant.bts.blocks = NULL;
1135
1136 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
1137
1138 case BTRACE_FORMAT_PT:
bc504a31 1139 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
1140 btrace->format = BTRACE_FORMAT_PT;
1141 btrace->variant.pt.data = NULL;
1142 btrace->variant.pt.size = 0;
1143
1144 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
1145 }
1146
1147 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1148}
1149
1150/* See linux-btrace.h. */
734b0e4b 1151
f4abbc16
MM
1152const struct btrace_config *
1153linux_btrace_conf (const struct btrace_target_info *tinfo)
1154{
1155 return &tinfo->conf;
734b0e4b
MM
1156}
1157
7c97f91e
MM
1158#else /* !HAVE_LINUX_PERF_EVENT_H */
1159
1160/* See linux-btrace.h. */
1161
1162int
043c3577 1163linux_supports_btrace (struct target_ops *ops, enum btrace_format format)
7c97f91e
MM
1164{
1165 return 0;
1166}
1167
1168/* See linux-btrace.h. */
1169
1170struct btrace_target_info *
f4abbc16 1171linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
1172{
1173 return NULL;
1174}
1175
1176/* See linux-btrace.h. */
1177
969c39fb 1178enum btrace_error
7c97f91e
MM
1179linux_disable_btrace (struct btrace_target_info *tinfo)
1180{
969c39fb 1181 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1182}
1183
1184/* See linux-btrace.h. */
1185
969c39fb 1186enum btrace_error
734b0e4b 1187linux_read_btrace (struct btrace_data *btrace,
969c39fb 1188 struct btrace_target_info *tinfo,
7c97f91e
MM
1189 enum btrace_read_type type)
1190{
969c39fb 1191 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
1192}
1193
f4abbc16
MM
1194/* See linux-btrace.h. */
1195
1196const struct btrace_config *
1197linux_btrace_conf (const struct btrace_target_info *tinfo)
1198{
1199 return NULL;
1200}
1201
7c97f91e 1202#endif /* !HAVE_LINUX_PERF_EVENT_H */