]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/nat/linux-btrace.c
gdb: c++ify btrace_target_info
[thirdparty/binutils-gdb.git] / gdb / nat / linux-btrace.c
CommitLineData
7c97f91e
MM
1/* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
213516ef 3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
7c97f91e
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
268a13a5 22#include "gdbsupport/common-defs.h"
7c97f91e 23#include "linux-btrace.h"
268a13a5
TT
24#include "gdbsupport/common-regcache.h"
25#include "gdbsupport/gdb_wait.h"
df7e5265 26#include "x86-cpuid.h"
268a13a5
TT
27#include "gdbsupport/filestuff.h"
28#include "gdbsupport/scoped_fd.h"
29#include "gdbsupport/scoped_mmap.h"
0568462b
MM
30
31#include <inttypes.h>
7c97f91e 32
5b4e221c 33#include <sys/syscall.h>
5b4e221c
MF
34
35#if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
7c97f91e 36#include <unistd.h>
7c97f91e
MM
37#include <sys/mman.h>
38#include <sys/user.h>
5826e159 39#include "nat/gdb_ptrace.h"
a950d57c 40#include <sys/types.h>
a950d57c 41#include <signal.h>
7c97f91e
MM
42
43/* A branch trace record in perf_event. */
44struct perf_event_bts
45{
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51};
52
53/* A perf_event branch trace sample. */
54struct perf_event_sample
55{
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61};
62
afb778a2
MM
63/* Identify the cpu we're running on. */
64static struct btrace_cpu
65btrace_this_cpu (void)
66{
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
d9757bcd
MM
87 if (cpu.family == 0xf)
88 cpu.family += (cpuid >> 20) & 0xff;
afb778a2 89
d9757bcd
MM
90 cpu.model = (cpuid >> 4) & 0xf;
91 if ((cpu.family == 0x6) || ((cpu.family & 0xf) == 0xf))
afb778a2
MM
92 cpu.model += (cpuid >> 12) & 0xf0;
93 }
94 }
a51951c2
KB
95 else if (ebx == signature_AMD_ebx && ecx == signature_AMD_ecx
96 && edx == signature_AMD_edx)
97 cpu.vendor = CV_AMD;
afb778a2
MM
98 }
99
100 return cpu;
101}
102
aadf7753 103/* Return non-zero if there is new data in PEVENT; zero otherwise. */
7c97f91e 104
aadf7753
MM
105static int
106perf_event_new_data (const struct perf_event_buffer *pev)
7c97f91e 107{
aadf7753 108 return *pev->data_head != pev->last_head;
7c97f91e
MM
109}
110
b20a6524
MM
111/* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
114
115static gdb_byte *
e7b01ce0
MM
116perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
117 size_t size)
b20a6524
MM
118{
119 const gdb_byte *begin, *end, *start, *stop;
120 gdb_byte *buffer;
e7b01ce0
MM
121 size_t buffer_size;
122 __u64 data_tail;
b20a6524
MM
123
124 if (size == 0)
125 return NULL;
126
db58b373
MM
127 /* We should never ask for more data than the buffer can hold. */
128 buffer_size = pev->size;
129 gdb_assert (size <= buffer_size);
130
131 /* If we ask for more data than we seem to have, we wrap around and read
132 data from the end of the buffer. This is already handled by the %
133 BUFFER_SIZE operation, below. Here, we just need to make sure that we
134 don't underflow.
135
136 Note that this is perfectly OK for perf event buffers where data_head
137 doesn'grow indefinitely and instead wraps around to remain within the
138 buffer's boundaries. */
139 if (data_head < size)
140 data_head += buffer_size;
141
b20a6524
MM
142 gdb_assert (size <= data_head);
143 data_tail = data_head - size;
144
b20a6524
MM
145 begin = pev->mem;
146 start = begin + data_tail % buffer_size;
147 stop = begin + data_head % buffer_size;
148
224c3ddb 149 buffer = (gdb_byte *) xmalloc (size);
b20a6524
MM
150
151 if (start < stop)
152 memcpy (buffer, start, stop - start);
153 else
154 {
155 end = begin + buffer_size;
156
157 memcpy (buffer, start, end - start);
158 memcpy (buffer + (end - start), begin, stop - begin);
159 }
160
161 return buffer;
162}
163
164/* Copy the perf event buffer data from PEV.
165 Store a pointer to the copy into DATA and its size in SIZE. */
166
167static void
168perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
e7b01ce0 169 size_t *psize)
b20a6524 170{
e7b01ce0
MM
171 size_t size;
172 __u64 data_head;
b20a6524
MM
173
174 data_head = *pev->data_head;
b20a6524 175 size = pev->size;
b20a6524
MM
176
177 *data = perf_event_read (pev, data_head, size);
178 *psize = size;
179
180 pev->last_head = data_head;
181}
182
0568462b
MM
183/* Try to determine the start address of the Linux kernel. */
184
185static uint64_t
186linux_determine_kernel_start (void)
d68e53f4 187{
0568462b
MM
188 static uint64_t kernel_start;
189 static int cached;
d68e53f4 190
0568462b
MM
191 if (cached != 0)
192 return kernel_start;
d68e53f4 193
0568462b
MM
194 cached = 1;
195
d419f42d 196 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
0568462b
MM
197 if (file == NULL)
198 return kernel_start;
199
d419f42d 200 while (!feof (file.get ()))
0568462b
MM
201 {
202 char buffer[1024], symbol[8], *line;
203 uint64_t addr;
204 int match;
205
d419f42d 206 line = fgets (buffer, sizeof (buffer), file.get ());
0568462b
MM
207 if (line == NULL)
208 break;
d68e53f4 209
0568462b
MM
210 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
211 if (match != 2)
212 continue;
d68e53f4 213
0568462b
MM
214 if (strcmp (symbol, "_text") == 0)
215 {
216 kernel_start = addr;
217 break;
218 }
219 }
220
0568462b 221 return kernel_start;
d68e53f4
MM
222}
223
7c97f91e
MM
224/* Check whether an address is in the kernel. */
225
226static inline int
0568462b 227perf_event_is_kernel_addr (uint64_t addr)
7c97f91e 228{
0568462b 229 uint64_t kernel_start;
7c97f91e 230
0568462b
MM
231 kernel_start = linux_determine_kernel_start ();
232 if (kernel_start != 0ull)
233 return (addr >= kernel_start);
7c97f91e 234
0568462b
MM
235 /* If we don't know the kernel's start address, let's check the most
236 significant bit. This will work at least for 64-bit kernels. */
237 return ((addr & (1ull << 63)) != 0);
7c97f91e
MM
238}
239
240/* Check whether a perf event record should be skipped. */
241
242static inline int
0568462b 243perf_event_skip_bts_record (const struct perf_event_bts *bts)
7c97f91e
MM
244{
245 /* The hardware may report branches from kernel into user space. Branches
246 from user into kernel space will be suppressed. We filter the former to
247 provide a consistent branch trace excluding kernel. */
0568462b 248 return perf_event_is_kernel_addr (bts->from);
7c97f91e
MM
249}
250
251/* Perform a few consistency checks on a perf event sample record. This is
252 meant to catch cases when we get out of sync with the perf event stream. */
253
254static inline int
255perf_event_sample_ok (const struct perf_event_sample *sample)
256{
257 if (sample->header.type != PERF_RECORD_SAMPLE)
258 return 0;
259
260 if (sample->header.size != sizeof (*sample))
261 return 0;
262
263 return 1;
264}
265
266/* Branch trace is collected in a circular buffer [begin; end) as pairs of from
267 and to addresses (plus a header).
268
269 Start points into that buffer at the next sample position.
270 We read the collected samples backwards from start.
271
272 While reading the samples, we convert the information into a list of blocks.
273 For two adjacent samples s1 and s2, we form a block b such that b.begin =
274 s1.to and b.end = s2.from.
275
276 In case the buffer overflows during sampling, one sample may have its lower
277 part at the end and its upper part at the beginning of the buffer. */
278
a8b3b8e9 279static std::vector<btrace_block> *
cdda72c2 280perf_event_read_bts (btrace_target_info *tinfo, const uint8_t *begin,
e7b01ce0 281 const uint8_t *end, const uint8_t *start, size_t size)
7c97f91e 282{
a8b3b8e9 283 std::vector<btrace_block> *btrace = new std::vector<btrace_block>;
7c97f91e 284 struct perf_event_sample sample;
e7b01ce0 285 size_t read = 0;
7c97f91e
MM
286 struct btrace_block block = { 0, 0 };
287 struct regcache *regcache;
288
289 gdb_assert (begin <= start);
290 gdb_assert (start <= end);
291
292 /* The first block ends at the current pc. */
361c8ade 293 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
7c97f91e
MM
294 block.end = regcache_read_pc (regcache);
295
296 /* The buffer may contain a partial record as its last entry (i.e. when the
297 buffer size is not a multiple of the sample size). */
298 read = sizeof (sample) - 1;
299
300 for (; read < size; read += sizeof (sample))
301 {
302 const struct perf_event_sample *psample;
303
304 /* Find the next perf_event sample in a backwards traversal. */
305 start -= sizeof (sample);
306
307 /* If we're still inside the buffer, we're done. */
308 if (begin <= start)
309 psample = (const struct perf_event_sample *) start;
310 else
311 {
312 int missing;
313
314 /* We're to the left of the ring buffer, we will wrap around and
315 reappear at the very right of the ring buffer. */
316
317 missing = (begin - start);
318 start = (end - missing);
319
320 /* If the entire sample is missing, we're done. */
321 if (missing == sizeof (sample))
322 psample = (const struct perf_event_sample *) start;
323 else
324 {
325 uint8_t *stack;
326
327 /* The sample wrapped around. The lower part is at the end and
328 the upper part is at the beginning of the buffer. */
329 stack = (uint8_t *) &sample;
330
331 /* Copy the two parts so we have a contiguous sample. */
332 memcpy (stack, start, missing);
333 memcpy (stack + missing, begin, sizeof (sample) - missing);
334
335 psample = &sample;
336 }
337 }
338
339 if (!perf_event_sample_ok (psample))
340 {
341 warning (_("Branch trace may be incomplete."));
342 break;
343 }
344
0568462b 345 if (perf_event_skip_bts_record (&psample->bts))
7c97f91e
MM
346 continue;
347
348 /* We found a valid sample, so we can complete the current block. */
349 block.begin = psample->bts.to;
350
46f29a9a 351 btrace->push_back (block);
7c97f91e
MM
352
353 /* Start the next block. */
354 block.end = psample->bts.from;
355 }
356
969c39fb
MM
357 /* Push the last block (i.e. the first one of inferior execution), as well.
358 We don't know where it ends, but we know where it starts. If we're
359 reading delta trace, we can fill in the start address later on.
360 Otherwise we will prune it. */
361 block.begin = 0;
46f29a9a 362 btrace->push_back (block);
969c39fb 363
7c97f91e
MM
364 return btrace;
365}
366
043c3577 367/* Check whether an Intel cpu supports BTS. */
a950d57c
MM
368
369static int
afb778a2 370intel_supports_bts (const struct btrace_cpu *cpu)
a950d57c 371{
afb778a2 372 switch (cpu->family)
5f8e0b8f
MF
373 {
374 case 0x6:
afb778a2 375 switch (cpu->model)
5f8e0b8f
MF
376 {
377 case 0x1a: /* Nehalem */
378 case 0x1f:
379 case 0x1e:
380 case 0x2e:
381 case 0x25: /* Westmere */
382 case 0x2c:
383 case 0x2f:
384 case 0x2a: /* Sandy Bridge */
385 case 0x2d:
386 case 0x3a: /* Ivy Bridge */
387
388 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
389 "from" information afer an EIST transition, T-states, C1E, or
390 Adaptive Thermal Throttling. */
391 return 0;
392 }
393 }
a950d57c
MM
394
395 return 1;
a950d57c
MM
396}
397
043c3577 398/* Check whether the cpu supports BTS. */
a950d57c
MM
399
400static int
043c3577 401cpu_supports_bts (void)
a950d57c 402{
afb778a2 403 struct btrace_cpu cpu;
a950d57c 404
afb778a2
MM
405 cpu = btrace_this_cpu ();
406 switch (cpu.vendor)
407 {
408 default:
409 /* Don't know about others. Let's assume they do. */
410 return 1;
a950d57c 411
afb778a2
MM
412 case CV_INTEL:
413 return intel_supports_bts (&cpu);
a51951c2
KB
414
415 case CV_AMD:
416 return 0;
afb778a2 417 }
a950d57c
MM
418}
419
88711fbf
MM
420/* The perf_event_open syscall failed. Try to print a helpful error
421 message. */
422
423static void
424diagnose_perf_event_open_fail ()
425{
426 switch (errno)
427 {
428 case EPERM:
429 case EACCES:
430 {
431 static const char filename[] = "/proc/sys/kernel/perf_event_paranoid";
cd393cec 432 errno = 0;
88711fbf
MM
433 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
434 if (file.get () == nullptr)
cd393cec
MM
435 error (_("Failed to open %s (%s). Your system does not support "
436 "process recording."), filename, safe_strerror (errno));
88711fbf
MM
437
438 int level, found = fscanf (file.get (), "%d", &level);
439 if (found == 1 && level > 2)
440 error (_("You do not have permission to record the process. "
441 "Try setting %s to 2 or less."), filename);
442 }
443
444 break;
445 }
446
447 error (_("Failed to start recording: %s"), safe_strerror (errno));
448}
449
cdda72c2
MM
450/* Get the linux version of a btrace_target_info. */
451
452static linux_btrace_target_info *
453get_linux_btrace_target_info (btrace_target_info *gtinfo)
454{
455 return gdb::checked_static_cast<linux_btrace_target_info *> (gtinfo);
456}
457
f4abbc16 458/* Enable branch tracing in BTS format. */
043c3577 459
f4abbc16 460static struct btrace_target_info *
d33501a5 461linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
7c97f91e 462{
e7b01ce0
MM
463 size_t size, pages;
464 __u64 data_offset;
d0fa7535 465 int pid, pg;
7c97f91e 466
de6242d3
MM
467 if (!cpu_supports_bts ())
468 error (_("BTS support has been disabled for the target cpu."));
469
cdda72c2
MM
470 std::unique_ptr<linux_btrace_target_info> tinfo
471 { gdb::make_unique<linux_btrace_target_info> (ptid) };
7c97f91e 472
f4abbc16 473 tinfo->conf.format = BTRACE_FORMAT_BTS;
7c97f91e 474
cdda72c2
MM
475 tinfo->attr.size = sizeof (tinfo->attr);
476 tinfo->attr.type = PERF_TYPE_HARDWARE;
477 tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
478 tinfo->attr.sample_period = 1;
7c97f91e 479
f4abbc16 480 /* We sample from and to address. */
cdda72c2 481 tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
7c97f91e 482
cdda72c2
MM
483 tinfo->attr.exclude_kernel = 1;
484 tinfo->attr.exclude_hv = 1;
485 tinfo->attr.exclude_idle = 1;
7c97f91e 486
e38504b3 487 pid = ptid.lwp ();
7c97f91e 488 if (pid == 0)
e99b03dc 489 pid = ptid.pid ();
7c97f91e
MM
490
491 errno = 0;
cdda72c2 492 scoped_fd fd (syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0));
5c3284c1 493 if (fd.get () < 0)
88711fbf 494 diagnose_perf_event_open_fail ();
7c97f91e 495
d33501a5 496 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
497 pages = ((size_t) conf->size / PAGE_SIZE
498 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
d33501a5
MM
499 /* We need at least one page. */
500 if (pages == 0)
501 pages = 1;
502
503 /* The buffer size can be requested in powers of two pages. Adjust PAGES
504 to the next power of two. */
e7b01ce0
MM
505 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
506 if ((pages & ((size_t) 1 << pg)) != 0)
507 pages += ((size_t) 1 << pg);
d33501a5
MM
508
509 /* We try to allocate the requested size.
510 If that fails, try to get as much as we can. */
5c3284c1 511 scoped_mmap data;
d33501a5 512 for (; pages > 0; pages >>= 1)
d0fa7535 513 {
d33501a5 514 size_t length;
e7b01ce0 515 __u64 data_size;
d33501a5 516
e7b01ce0
MM
517 data_size = (__u64) pages * PAGE_SIZE;
518
519 /* Don't ask for more than we can represent in the configuration. */
520 if ((__u64) UINT_MAX < data_size)
521 continue;
522
523 size = (size_t) data_size;
d33501a5
MM
524 length = size + PAGE_SIZE;
525
526 /* Check for overflows. */
e7b01ce0 527 if ((__u64) length != data_size + PAGE_SIZE)
d33501a5
MM
528 continue;
529
17ad2a4f 530 errno = 0;
d0fa7535 531 /* The number of pages we request needs to be a power of two. */
5c3284c1
MM
532 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
533 if (data.get () != MAP_FAILED)
aadf7753 534 break;
d0fa7535 535 }
7c97f91e 536
010a18a1 537 if (pages == 0)
17ad2a4f 538 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
aadf7753 539
5c3284c1
MM
540 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
541 data.get ();
010a18a1 542 data_offset = PAGE_SIZE;
010a18a1
MM
543
544#if defined (PERF_ATTR_SIZE_VER5)
545 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
546 {
e7b01ce0
MM
547 __u64 data_size;
548
010a18a1
MM
549 data_offset = header->data_offset;
550 data_size = header->data_size;
e7b01ce0
MM
551
552 size = (unsigned int) data_size;
553
554 /* Check for overflows. */
555 if ((__u64) size != data_size)
17ad2a4f 556 error (_("Failed to determine trace buffer size."));
010a18a1
MM
557 }
558#endif /* defined (PERF_ATTR_SIZE_VER5) */
559
cdda72c2
MM
560 tinfo->pev.size = size;
561 tinfo->pev.data_head = &header->data_head;
562 tinfo->pev.mem = (const uint8_t *) data.release () + data_offset;
563 tinfo->pev.last_head = 0ull;
564 tinfo->header = header;
565 tinfo->file = fd.release ();
aadf7753 566
5c3284c1
MM
567 tinfo->conf.bts.size = (unsigned int) size;
568 return tinfo.release ();
b20a6524
MM
569}
570
571#if defined (PERF_ATTR_SIZE_VER5)
572
17ad2a4f 573/* Determine the event type. */
de6242d3
MM
574
575static int
17ad2a4f 576perf_event_pt_event_type ()
de6242d3 577{
17ad2a4f
MM
578 static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
579
580 errno = 0;
581 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
de6242d3 582 if (file.get () == nullptr)
cd393cec
MM
583 switch (errno)
584 {
585 case EACCES:
586 case EFAULT:
587 case EPERM:
588 error (_("Failed to open %s (%s). You do not have permission "
589 "to use Intel PT."), filename, safe_strerror (errno));
590
591 case ENOTDIR:
592 case ENOENT:
593 error (_("Failed to open %s (%s). Your system does not support "
594 "Intel PT."), filename, safe_strerror (errno));
595
596 default:
597 error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
598 }
de6242d3 599
17ad2a4f
MM
600 int type, found = fscanf (file.get (), "%d", &type);
601 if (found != 1)
602 error (_("Failed to read the PT event type from %s."), filename);
603
604 return type;
de6242d3
MM
605}
606
bc504a31 607/* Enable branch tracing in Intel Processor Trace format. */
b20a6524
MM
608
609static struct btrace_target_info *
610linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
611{
5c3284c1 612 size_t pages;
17ad2a4f 613 int pid, pg;
b20a6524 614
e38504b3 615 pid = ptid.lwp ();
b20a6524 616 if (pid == 0)
e99b03dc 617 pid = ptid.pid ();
b20a6524 618
cdda72c2
MM
619 std::unique_ptr<linux_btrace_target_info> tinfo
620 { gdb::make_unique<linux_btrace_target_info> (ptid) };
b20a6524
MM
621
622 tinfo->conf.format = BTRACE_FORMAT_PT;
b20a6524 623
cdda72c2
MM
624 tinfo->attr.size = sizeof (tinfo->attr);
625 tinfo->attr.type = perf_event_pt_event_type ();
b20a6524 626
cdda72c2
MM
627 tinfo->attr.exclude_kernel = 1;
628 tinfo->attr.exclude_hv = 1;
629 tinfo->attr.exclude_idle = 1;
b20a6524
MM
630
631 errno = 0;
cdda72c2 632 scoped_fd fd (syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0));
5c3284c1 633 if (fd.get () < 0)
88711fbf 634 diagnose_perf_event_open_fail ();
b20a6524
MM
635
636 /* Allocate the configuration page. */
5c3284c1
MM
637 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
638 fd.get (), 0);
639 if (data.get () == MAP_FAILED)
17ad2a4f 640 error (_("Failed to map trace user page: %s."), safe_strerror (errno));
5c3284c1
MM
641
642 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
643 data.get ();
b20a6524
MM
644
645 header->aux_offset = header->data_offset + header->data_size;
646
647 /* Convert the requested size in bytes to pages (rounding up). */
e7b01ce0
MM
648 pages = ((size_t) conf->size / PAGE_SIZE
649 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
b20a6524
MM
650 /* We need at least one page. */
651 if (pages == 0)
652 pages = 1;
653
654 /* The buffer size can be requested in powers of two pages. Adjust PAGES
655 to the next power of two. */
e7b01ce0
MM
656 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
657 if ((pages & ((size_t) 1 << pg)) != 0)
658 pages += ((size_t) 1 << pg);
b20a6524
MM
659
660 /* We try to allocate the requested size.
661 If that fails, try to get as much as we can. */
5c3284c1 662 scoped_mmap aux;
b20a6524
MM
663 for (; pages > 0; pages >>= 1)
664 {
665 size_t length;
e7b01ce0 666 __u64 data_size;
b20a6524 667
e7b01ce0
MM
668 data_size = (__u64) pages * PAGE_SIZE;
669
670 /* Don't ask for more than we can represent in the configuration. */
671 if ((__u64) UINT_MAX < data_size)
672 continue;
673
5c3284c1 674 length = (size_t) data_size;
b20a6524
MM
675
676 /* Check for overflows. */
5c3284c1 677 if ((__u64) length != data_size)
b20a6524
MM
678 continue;
679
e7b01ce0 680 header->aux_size = data_size;
b20a6524 681
17ad2a4f 682 errno = 0;
5c3284c1
MM
683 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
684 header->aux_offset);
685 if (aux.get () != MAP_FAILED)
b20a6524
MM
686 break;
687 }
688
689 if (pages == 0)
17ad2a4f 690 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
b20a6524 691
cdda72c2
MM
692 tinfo->pev.size = aux.size ();
693 tinfo->pev.mem = (const uint8_t *) aux.release ();
694 tinfo->pev.data_head = &header->aux_head;
695 tinfo->header = (struct perf_event_mmap_page *) data.release ();
696 gdb_assert (tinfo->header == header);
697 tinfo->file = fd.release ();
b20a6524 698
cdda72c2 699 tinfo->conf.pt.size = (unsigned int) tinfo->pev.size;
5c3284c1 700 return tinfo.release ();
7c97f91e
MM
701}
702
b20a6524
MM
703#else /* !defined (PERF_ATTR_SIZE_VER5) */
704
705static struct btrace_target_info *
706linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
707{
17ad2a4f 708 error (_("Intel Processor Trace support was disabled at compile time."));
b20a6524
MM
709}
710
711#endif /* !defined (PERF_ATTR_SIZE_VER5) */
712
7c97f91e
MM
713/* See linux-btrace.h. */
714
f4abbc16
MM
715struct btrace_target_info *
716linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
717{
f4abbc16
MM
718 switch (conf->format)
719 {
720 case BTRACE_FORMAT_NONE:
17ad2a4f
MM
721 error (_("Bad branch trace format."));
722
723 default:
724 error (_("Unknown branch trace format."));
f4abbc16
MM
725
726 case BTRACE_FORMAT_BTS:
17ad2a4f 727 return linux_enable_bts (ptid, &conf->bts);
b20a6524
MM
728
729 case BTRACE_FORMAT_PT:
17ad2a4f 730 return linux_enable_pt (ptid, &conf->pt);
f4abbc16 731 }
f4abbc16
MM
732}
733
734/* Disable BTS tracing. */
735
cdda72c2
MM
736static void
737linux_disable_bts (struct linux_btrace_target_info *tinfo)
7c97f91e 738{
cdda72c2 739 munmap ((void *) tinfo->header, tinfo->pev.size + PAGE_SIZE);
7c97f91e 740 close (tinfo->file);
7c97f91e
MM
741}
742
bc504a31 743/* Disable Intel Processor Trace tracing. */
b20a6524 744
cdda72c2
MM
745static void
746linux_disable_pt (struct linux_btrace_target_info *tinfo)
b20a6524 747{
cdda72c2
MM
748 munmap ((void *) tinfo->pev.mem, tinfo->pev.size);
749 munmap ((void *) tinfo->header, PAGE_SIZE);
b20a6524 750 close (tinfo->file);
b20a6524
MM
751}
752
f4abbc16
MM
753/* See linux-btrace.h. */
754
755enum btrace_error
cdda72c2 756linux_disable_btrace (struct btrace_target_info *gtinfo)
f4abbc16 757{
cdda72c2
MM
758 linux_btrace_target_info *tinfo
759 = get_linux_btrace_target_info (gtinfo);
f4abbc16 760
f4abbc16
MM
761 switch (tinfo->conf.format)
762 {
763 case BTRACE_FORMAT_NONE:
cdda72c2 764 return BTRACE_ERR_NOT_SUPPORTED;
f4abbc16
MM
765
766 case BTRACE_FORMAT_BTS:
cdda72c2
MM
767 linux_disable_bts (tinfo);
768 delete tinfo;
769 return BTRACE_ERR_NONE;
b20a6524
MM
770
771 case BTRACE_FORMAT_PT:
cdda72c2
MM
772 linux_disable_pt (tinfo);
773 delete tinfo;
774 return BTRACE_ERR_NONE;
f4abbc16
MM
775 }
776
cdda72c2 777 return BTRACE_ERR_NOT_SUPPORTED;
f4abbc16
MM
778}
779
734b0e4b
MM
780/* Read branch trace data in BTS format for the thread given by TINFO into
781 BTRACE using the TYPE reading method. */
7c97f91e 782
734b0e4b 783static enum btrace_error
cdda72c2 784linux_read_bts (btrace_data_bts *btrace, linux_btrace_target_info *tinfo,
734b0e4b 785 enum btrace_read_type type)
7c97f91e 786{
7c97f91e 787 const uint8_t *begin, *end, *start;
e7b01ce0 788 size_t buffer_size, size;
44ca285b 789 __u64 data_head = 0, data_tail;
aadf7753
MM
790 unsigned int retries = 5;
791
969c39fb
MM
792 /* For delta reads, we return at least the partial last block containing
793 the current PC. */
cdda72c2 794 if (type == BTRACE_READ_NEW && !perf_event_new_data (&tinfo->pev))
969c39fb 795 return BTRACE_ERR_NONE;
7c97f91e 796
cdda72c2
MM
797 buffer_size = tinfo->pev.size;
798 data_tail = tinfo->pev.last_head;
7c97f91e
MM
799
800 /* We may need to retry reading the trace. See below. */
801 while (retries--)
802 {
cdda72c2 803 data_head = *tinfo->pev.data_head;
7c97f91e 804
ed9edfb5 805 /* Delete any leftover trace from the previous iteration. */
46f29a9a
AB
806 delete btrace->blocks;
807 btrace->blocks = nullptr;
ed9edfb5 808
969c39fb 809 if (type == BTRACE_READ_DELTA)
7c97f91e 810 {
e7b01ce0
MM
811 __u64 data_size;
812
969c39fb
MM
813 /* Determine the number of bytes to read and check for buffer
814 overflows. */
815
816 /* Check for data head overflows. We might be able to recover from
817 those but they are very unlikely and it's not really worth the
818 effort, I think. */
819 if (data_head < data_tail)
820 return BTRACE_ERR_OVERFLOW;
821
822 /* If the buffer is smaller than the trace delta, we overflowed. */
e7b01ce0
MM
823 data_size = data_head - data_tail;
824 if (buffer_size < data_size)
969c39fb 825 return BTRACE_ERR_OVERFLOW;
e7b01ce0
MM
826
827 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
828 size = (size_t) data_size;
969c39fb
MM
829 }
830 else
831 {
832 /* Read the entire buffer. */
833 size = buffer_size;
7c97f91e 834
969c39fb
MM
835 /* Adjust the size if the buffer has not overflowed, yet. */
836 if (data_head < size)
e7b01ce0 837 size = (size_t) data_head;
7c97f91e
MM
838 }
839
969c39fb 840 /* Data_head keeps growing; the buffer itself is circular. */
cdda72c2 841 begin = tinfo->pev.mem;
969c39fb
MM
842 start = begin + data_head % buffer_size;
843
844 if (data_head <= buffer_size)
845 end = start;
846 else
cdda72c2 847 end = begin + tinfo->pev.size;
969c39fb 848
734b0e4b 849 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
969c39fb 850
7c97f91e
MM
851 /* The stopping thread notifies its ptracer before it is scheduled out.
852 On multi-core systems, the debugger might therefore run while the
853 kernel might be writing the last branch trace records.
854
855 Let's check whether the data head moved while we read the trace. */
cdda72c2 856 if (data_head == *tinfo->pev.data_head)
7c97f91e
MM
857 break;
858 }
859
cdda72c2 860 tinfo->pev.last_head = data_head;
7c97f91e 861
969c39fb
MM
862 /* Prune the incomplete last block (i.e. the first one of inferior execution)
863 if we're not doing a delta read. There is no way of filling in its zeroed
864 BEGIN element. */
46f29a9a
AB
865 if (!btrace->blocks->empty () && type != BTRACE_READ_DELTA)
866 btrace->blocks->pop_back ();
969c39fb
MM
867
868 return BTRACE_ERR_NONE;
7c97f91e
MM
869}
870
bc504a31 871/* Fill in the Intel Processor Trace configuration information. */
b20a6524
MM
872
873static void
874linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
875{
876 conf->cpu = btrace_this_cpu ();
877}
878
bc504a31 879/* Read branch trace data in Intel Processor Trace format for the thread
b20a6524
MM
880 given by TINFO into BTRACE using the TYPE reading method. */
881
882static enum btrace_error
cdda72c2 883linux_read_pt (btrace_data_pt *btrace, linux_btrace_target_info *tinfo,
b20a6524
MM
884 enum btrace_read_type type)
885{
b20a6524
MM
886 linux_fill_btrace_pt_config (&btrace->config);
887
888 switch (type)
889 {
890 case BTRACE_READ_DELTA:
891 /* We don't support delta reads. The data head (i.e. aux_head) wraps
892 around to stay inside the aux buffer. */
893 return BTRACE_ERR_NOT_SUPPORTED;
894
895 case BTRACE_READ_NEW:
cdda72c2 896 if (!perf_event_new_data (&tinfo->pev))
b20a6524
MM
897 return BTRACE_ERR_NONE;
898
899 /* Fall through. */
900 case BTRACE_READ_ALL:
cdda72c2 901 perf_event_read_all (&tinfo->pev, &btrace->data, &btrace->size);
b20a6524
MM
902 return BTRACE_ERR_NONE;
903 }
904
f34652de 905 internal_error (_("Unknown btrace read type."));
b20a6524
MM
906}
907
734b0e4b
MM
908/* See linux-btrace.h. */
909
910enum btrace_error
911linux_read_btrace (struct btrace_data *btrace,
cdda72c2 912 struct btrace_target_info *gtinfo,
734b0e4b
MM
913 enum btrace_read_type type)
914{
cdda72c2
MM
915 linux_btrace_target_info *tinfo
916 = get_linux_btrace_target_info (gtinfo);
917
f4abbc16
MM
918 switch (tinfo->conf.format)
919 {
920 case BTRACE_FORMAT_NONE:
921 return BTRACE_ERR_NOT_SUPPORTED;
922
923 case BTRACE_FORMAT_BTS:
924 /* We read btrace in BTS format. */
925 btrace->format = BTRACE_FORMAT_BTS;
926 btrace->variant.bts.blocks = NULL;
927
928 return linux_read_bts (&btrace->variant.bts, tinfo, type);
b20a6524
MM
929
930 case BTRACE_FORMAT_PT:
bc504a31 931 /* We read btrace in Intel Processor Trace format. */
b20a6524
MM
932 btrace->format = BTRACE_FORMAT_PT;
933 btrace->variant.pt.data = NULL;
934 btrace->variant.pt.size = 0;
935
936 return linux_read_pt (&btrace->variant.pt, tinfo, type);
f4abbc16
MM
937 }
938
f34652de 939 internal_error (_("Unkown branch trace format."));
f4abbc16
MM
940}
941
942/* See linux-btrace.h. */
734b0e4b 943
f4abbc16
MM
944const struct btrace_config *
945linux_btrace_conf (const struct btrace_target_info *tinfo)
946{
947 return &tinfo->conf;
734b0e4b
MM
948}
949
7c97f91e
MM
950#else /* !HAVE_LINUX_PERF_EVENT_H */
951
952/* See linux-btrace.h. */
953
7c97f91e 954struct btrace_target_info *
f4abbc16 955linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
7c97f91e
MM
956{
957 return NULL;
958}
959
960/* See linux-btrace.h. */
961
969c39fb 962enum btrace_error
7c97f91e
MM
963linux_disable_btrace (struct btrace_target_info *tinfo)
964{
969c39fb 965 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
966}
967
968/* See linux-btrace.h. */
969
969c39fb 970enum btrace_error
734b0e4b 971linux_read_btrace (struct btrace_data *btrace,
969c39fb 972 struct btrace_target_info *tinfo,
7c97f91e
MM
973 enum btrace_read_type type)
974{
969c39fb 975 return BTRACE_ERR_NOT_SUPPORTED;
7c97f91e
MM
976}
977
f4abbc16
MM
978/* See linux-btrace.h. */
979
980const struct btrace_config *
981linux_btrace_conf (const struct btrace_target_info *tinfo)
982{
983 return NULL;
984}
985
7c97f91e 986#endif /* !HAVE_LINUX_PERF_EVENT_H */