]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/nat/linux-btrace.c
c5b3f1c93cf0e5e2c314bf3fbe781a1f843d9c82
[thirdparty/binutils-gdb.git] / gdb / nat / linux-btrace.c
1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
2
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "gdbsupport/common-defs.h"
23 #include "linux-btrace.h"
24 #include "gdbsupport/common-regcache.h"
25 #include "gdbsupport/gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "gdbsupport/filestuff.h"
28 #include "gdbsupport/scoped_fd.h"
29 #include "gdbsupport/scoped_mmap.h"
30
31 #include <inttypes.h>
32
33 #include <sys/syscall.h>
34
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
36 #include <unistd.h>
37 #include <sys/mman.h>
38 #include <sys/user.h>
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
41 #include <signal.h>
42
43 /* A branch trace record in perf_event. */
44 struct perf_event_bts
45 {
46 /* The linear address of the branch source. */
47 uint64_t from;
48
49 /* The linear address of the branch destination. */
50 uint64_t to;
51 };
52
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
55 {
56 /* The perf_event sample header. */
57 struct perf_event_header header;
58
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts;
61 };
62
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
66 {
67 struct btrace_cpu cpu;
68 unsigned int eax, ebx, ecx, edx;
69 int ok;
70
71 memset (&cpu, 0, sizeof (cpu));
72
73 ok = x86_cpuid (0, &eax, &ebx, &ecx, &edx);
74 if (ok != 0)
75 {
76 if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx
77 && edx == signature_INTEL_edx)
78 {
79 unsigned int cpuid, ignore;
80
81 ok = x86_cpuid (1, &cpuid, &ignore, &ignore, &ignore);
82 if (ok != 0)
83 {
84 cpu.vendor = CV_INTEL;
85
86 cpu.family = (cpuid >> 8) & 0xf;
87 if (cpu.family == 0xf)
88 cpu.family += (cpuid >> 20) & 0xff;
89
90 cpu.model = (cpuid >> 4) & 0xf;
91 if ((cpu.family == 0x6) || ((cpu.family & 0xf) == 0xf))
92 cpu.model += (cpuid >> 12) & 0xf0;
93 }
94 }
95 else if (ebx == signature_AMD_ebx && ecx == signature_AMD_ecx
96 && edx == signature_AMD_edx)
97 cpu.vendor = CV_AMD;
98 }
99
100 return cpu;
101 }
102
103 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
104
105 static int
106 perf_event_new_data (const struct perf_event_buffer *pev)
107 {
108 return *pev->data_head != pev->last_head;
109 }
110
111 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
114
115 static gdb_byte *
116 perf_event_read (const struct perf_event_buffer *pev, __u64 data_head,
117 size_t size)
118 {
119 const gdb_byte *begin, *end, *start, *stop;
120 gdb_byte *buffer;
121 size_t buffer_size;
122 __u64 data_tail;
123
124 if (size == 0)
125 return NULL;
126
127 /* We should never ask for more data than the buffer can hold. */
128 buffer_size = pev->size;
129 gdb_assert (size <= buffer_size);
130
131 /* If we ask for more data than we seem to have, we wrap around and read
132 data from the end of the buffer. This is already handled by the %
133 BUFFER_SIZE operation, below. Here, we just need to make sure that we
134 don't underflow.
135
136 Note that this is perfectly OK for perf event buffers where data_head
137 doesn'grow indefinitely and instead wraps around to remain within the
138 buffer's boundaries. */
139 if (data_head < size)
140 data_head += buffer_size;
141
142 gdb_assert (size <= data_head);
143 data_tail = data_head - size;
144
145 begin = pev->mem;
146 start = begin + data_tail % buffer_size;
147 stop = begin + data_head % buffer_size;
148
149 buffer = (gdb_byte *) xmalloc (size);
150
151 if (start < stop)
152 memcpy (buffer, start, stop - start);
153 else
154 {
155 end = begin + buffer_size;
156
157 memcpy (buffer, start, end - start);
158 memcpy (buffer + (end - start), begin, stop - begin);
159 }
160
161 return buffer;
162 }
163
164 /* Copy the perf event buffer data from PEV.
165 Store a pointer to the copy into DATA and its size in SIZE. */
166
167 static void
168 perf_event_read_all (struct perf_event_buffer *pev, gdb_byte **data,
169 size_t *psize)
170 {
171 size_t size;
172 __u64 data_head;
173
174 data_head = *pev->data_head;
175 size = pev->size;
176
177 *data = perf_event_read (pev, data_head, size);
178 *psize = size;
179
180 pev->last_head = data_head;
181 }
182
183 /* Try to determine the start address of the Linux kernel. */
184
185 static uint64_t
186 linux_determine_kernel_start (void)
187 {
188 static uint64_t kernel_start;
189 static int cached;
190
191 if (cached != 0)
192 return kernel_start;
193
194 cached = 1;
195
196 gdb_file_up file = gdb_fopen_cloexec ("/proc/kallsyms", "r");
197 if (file == NULL)
198 return kernel_start;
199
200 while (!feof (file.get ()))
201 {
202 char buffer[1024], symbol[8], *line;
203 uint64_t addr;
204 int match;
205
206 line = fgets (buffer, sizeof (buffer), file.get ());
207 if (line == NULL)
208 break;
209
210 match = sscanf (line, "%" SCNx64 " %*[tT] %7s", &addr, symbol);
211 if (match != 2)
212 continue;
213
214 if (strcmp (symbol, "_text") == 0)
215 {
216 kernel_start = addr;
217 break;
218 }
219 }
220
221 return kernel_start;
222 }
223
224 /* Check whether an address is in the kernel. */
225
226 static inline int
227 perf_event_is_kernel_addr (uint64_t addr)
228 {
229 uint64_t kernel_start;
230
231 kernel_start = linux_determine_kernel_start ();
232 if (kernel_start != 0ull)
233 return (addr >= kernel_start);
234
235 /* If we don't know the kernel's start address, let's check the most
236 significant bit. This will work at least for 64-bit kernels. */
237 return ((addr & (1ull << 63)) != 0);
238 }
239
240 /* Check whether a perf event record should be skipped. */
241
242 static inline int
243 perf_event_skip_bts_record (const struct perf_event_bts *bts)
244 {
245 /* The hardware may report branches from kernel into user space. Branches
246 from user into kernel space will be suppressed. We filter the former to
247 provide a consistent branch trace excluding kernel. */
248 return perf_event_is_kernel_addr (bts->from);
249 }
250
251 /* Perform a few consistency checks on a perf event sample record. This is
252 meant to catch cases when we get out of sync with the perf event stream. */
253
254 static inline int
255 perf_event_sample_ok (const struct perf_event_sample *sample)
256 {
257 if (sample->header.type != PERF_RECORD_SAMPLE)
258 return 0;
259
260 if (sample->header.size != sizeof (*sample))
261 return 0;
262
263 return 1;
264 }
265
266 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
267 and to addresses (plus a header).
268
269 Start points into that buffer at the next sample position.
270 We read the collected samples backwards from start.
271
272 While reading the samples, we convert the information into a list of blocks.
273 For two adjacent samples s1 and s2, we form a block b such that b.begin =
274 s1.to and b.end = s2.from.
275
276 In case the buffer overflows during sampling, one sample may have its lower
277 part at the end and its upper part at the beginning of the buffer. */
278
279 static std::vector<btrace_block> *
280 perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin,
281 const uint8_t *end, const uint8_t *start, size_t size)
282 {
283 std::vector<btrace_block> *btrace = new std::vector<btrace_block>;
284 struct perf_event_sample sample;
285 size_t read = 0;
286 struct btrace_block block = { 0, 0 };
287 struct regcache *regcache;
288
289 gdb_assert (begin <= start);
290 gdb_assert (start <= end);
291
292 /* The first block ends at the current pc. */
293 regcache = get_thread_regcache_for_ptid (tinfo->ptid);
294 block.end = regcache_read_pc (regcache);
295
296 /* The buffer may contain a partial record as its last entry (i.e. when the
297 buffer size is not a multiple of the sample size). */
298 read = sizeof (sample) - 1;
299
300 for (; read < size; read += sizeof (sample))
301 {
302 const struct perf_event_sample *psample;
303
304 /* Find the next perf_event sample in a backwards traversal. */
305 start -= sizeof (sample);
306
307 /* If we're still inside the buffer, we're done. */
308 if (begin <= start)
309 psample = (const struct perf_event_sample *) start;
310 else
311 {
312 int missing;
313
314 /* We're to the left of the ring buffer, we will wrap around and
315 reappear at the very right of the ring buffer. */
316
317 missing = (begin - start);
318 start = (end - missing);
319
320 /* If the entire sample is missing, we're done. */
321 if (missing == sizeof (sample))
322 psample = (const struct perf_event_sample *) start;
323 else
324 {
325 uint8_t *stack;
326
327 /* The sample wrapped around. The lower part is at the end and
328 the upper part is at the beginning of the buffer. */
329 stack = (uint8_t *) &sample;
330
331 /* Copy the two parts so we have a contiguous sample. */
332 memcpy (stack, start, missing);
333 memcpy (stack + missing, begin, sizeof (sample) - missing);
334
335 psample = &sample;
336 }
337 }
338
339 if (!perf_event_sample_ok (psample))
340 {
341 warning (_("Branch trace may be incomplete."));
342 break;
343 }
344
345 if (perf_event_skip_bts_record (&psample->bts))
346 continue;
347
348 /* We found a valid sample, so we can complete the current block. */
349 block.begin = psample->bts.to;
350
351 btrace->push_back (block);
352
353 /* Start the next block. */
354 block.end = psample->bts.from;
355 }
356
357 /* Push the last block (i.e. the first one of inferior execution), as well.
358 We don't know where it ends, but we know where it starts. If we're
359 reading delta trace, we can fill in the start address later on.
360 Otherwise we will prune it. */
361 block.begin = 0;
362 btrace->push_back (block);
363
364 return btrace;
365 }
366
367 /* Check whether an Intel cpu supports BTS. */
368
369 static int
370 intel_supports_bts (const struct btrace_cpu *cpu)
371 {
372 switch (cpu->family)
373 {
374 case 0x6:
375 switch (cpu->model)
376 {
377 case 0x1a: /* Nehalem */
378 case 0x1f:
379 case 0x1e:
380 case 0x2e:
381 case 0x25: /* Westmere */
382 case 0x2c:
383 case 0x2f:
384 case 0x2a: /* Sandy Bridge */
385 case 0x2d:
386 case 0x3a: /* Ivy Bridge */
387
388 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
389 "from" information afer an EIST transition, T-states, C1E, or
390 Adaptive Thermal Throttling. */
391 return 0;
392 }
393 }
394
395 return 1;
396 }
397
398 /* Check whether the cpu supports BTS. */
399
400 static int
401 cpu_supports_bts (void)
402 {
403 struct btrace_cpu cpu;
404
405 cpu = btrace_this_cpu ();
406 switch (cpu.vendor)
407 {
408 default:
409 /* Don't know about others. Let's assume they do. */
410 return 1;
411
412 case CV_INTEL:
413 return intel_supports_bts (&cpu);
414
415 case CV_AMD:
416 return 0;
417 }
418 }
419
420 /* The perf_event_open syscall failed. Try to print a helpful error
421 message. */
422
423 static void
424 diagnose_perf_event_open_fail ()
425 {
426 switch (errno)
427 {
428 case EPERM:
429 case EACCES:
430 {
431 static const char filename[] = "/proc/sys/kernel/perf_event_paranoid";
432 errno = 0;
433 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
434 if (file.get () == nullptr)
435 error (_("Failed to open %s (%s). Your system does not support "
436 "process recording."), filename, safe_strerror (errno));
437
438 int level, found = fscanf (file.get (), "%d", &level);
439 if (found == 1 && level > 2)
440 error (_("You do not have permission to record the process. "
441 "Try setting %s to 2 or less."), filename);
442 }
443
444 break;
445 }
446
447 error (_("Failed to start recording: %s"), safe_strerror (errno));
448 }
449
450 /* Enable branch tracing in BTS format. */
451
452 static struct btrace_target_info *
453 linux_enable_bts (ptid_t ptid, const struct btrace_config_bts *conf)
454 {
455 struct btrace_tinfo_bts *bts;
456 size_t size, pages;
457 __u64 data_offset;
458 int pid, pg;
459
460 if (!cpu_supports_bts ())
461 error (_("BTS support has been disabled for the target cpu."));
462
463 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
464 (XCNEW (btrace_target_info));
465 tinfo->ptid = ptid;
466
467 tinfo->conf.format = BTRACE_FORMAT_BTS;
468 bts = &tinfo->variant.bts;
469
470 bts->attr.size = sizeof (bts->attr);
471 bts->attr.type = PERF_TYPE_HARDWARE;
472 bts->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
473 bts->attr.sample_period = 1;
474
475 /* We sample from and to address. */
476 bts->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR;
477
478 bts->attr.exclude_kernel = 1;
479 bts->attr.exclude_hv = 1;
480 bts->attr.exclude_idle = 1;
481
482 pid = ptid.lwp ();
483 if (pid == 0)
484 pid = ptid.pid ();
485
486 errno = 0;
487 scoped_fd fd (syscall (SYS_perf_event_open, &bts->attr, pid, -1, -1, 0));
488 if (fd.get () < 0)
489 diagnose_perf_event_open_fail ();
490
491 /* Convert the requested size in bytes to pages (rounding up). */
492 pages = ((size_t) conf->size / PAGE_SIZE
493 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
494 /* We need at least one page. */
495 if (pages == 0)
496 pages = 1;
497
498 /* The buffer size can be requested in powers of two pages. Adjust PAGES
499 to the next power of two. */
500 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
501 if ((pages & ((size_t) 1 << pg)) != 0)
502 pages += ((size_t) 1 << pg);
503
504 /* We try to allocate the requested size.
505 If that fails, try to get as much as we can. */
506 scoped_mmap data;
507 for (; pages > 0; pages >>= 1)
508 {
509 size_t length;
510 __u64 data_size;
511
512 data_size = (__u64) pages * PAGE_SIZE;
513
514 /* Don't ask for more than we can represent in the configuration. */
515 if ((__u64) UINT_MAX < data_size)
516 continue;
517
518 size = (size_t) data_size;
519 length = size + PAGE_SIZE;
520
521 /* Check for overflows. */
522 if ((__u64) length != data_size + PAGE_SIZE)
523 continue;
524
525 errno = 0;
526 /* The number of pages we request needs to be a power of two. */
527 data.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (), 0);
528 if (data.get () != MAP_FAILED)
529 break;
530 }
531
532 if (pages == 0)
533 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
534
535 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
536 data.get ();
537 data_offset = PAGE_SIZE;
538
539 #if defined (PERF_ATTR_SIZE_VER5)
540 if (offsetof (struct perf_event_mmap_page, data_size) <= header->size)
541 {
542 __u64 data_size;
543
544 data_offset = header->data_offset;
545 data_size = header->data_size;
546
547 size = (unsigned int) data_size;
548
549 /* Check for overflows. */
550 if ((__u64) size != data_size)
551 error (_("Failed to determine trace buffer size."));
552 }
553 #endif /* defined (PERF_ATTR_SIZE_VER5) */
554
555 bts->bts.size = size;
556 bts->bts.data_head = &header->data_head;
557 bts->bts.mem = (const uint8_t *) data.release () + data_offset;
558 bts->bts.last_head = 0ull;
559 bts->header = header;
560 bts->file = fd.release ();
561
562 tinfo->conf.bts.size = (unsigned int) size;
563 return tinfo.release ();
564 }
565
566 #if defined (PERF_ATTR_SIZE_VER5)
567
568 /* Determine the event type. */
569
570 static int
571 perf_event_pt_event_type ()
572 {
573 static const char filename[] = "/sys/bus/event_source/devices/intel_pt/type";
574
575 errno = 0;
576 gdb_file_up file = gdb_fopen_cloexec (filename, "r");
577 if (file.get () == nullptr)
578 switch (errno)
579 {
580 case EACCES:
581 case EFAULT:
582 case EPERM:
583 error (_("Failed to open %s (%s). You do not have permission "
584 "to use Intel PT."), filename, safe_strerror (errno));
585
586 case ENOTDIR:
587 case ENOENT:
588 error (_("Failed to open %s (%s). Your system does not support "
589 "Intel PT."), filename, safe_strerror (errno));
590
591 default:
592 error (_("Failed to open %s: %s."), filename, safe_strerror (errno));
593 }
594
595 int type, found = fscanf (file.get (), "%d", &type);
596 if (found != 1)
597 error (_("Failed to read the PT event type from %s."), filename);
598
599 return type;
600 }
601
602 /* Enable branch tracing in Intel Processor Trace format. */
603
604 static struct btrace_target_info *
605 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
606 {
607 struct btrace_tinfo_pt *pt;
608 size_t pages;
609 int pid, pg;
610
611 pid = ptid.lwp ();
612 if (pid == 0)
613 pid = ptid.pid ();
614
615 gdb::unique_xmalloc_ptr<btrace_target_info> tinfo
616 (XCNEW (btrace_target_info));
617 tinfo->ptid = ptid;
618
619 tinfo->conf.format = BTRACE_FORMAT_PT;
620 pt = &tinfo->variant.pt;
621
622 pt->attr.size = sizeof (pt->attr);
623 pt->attr.type = perf_event_pt_event_type ();
624
625 pt->attr.exclude_kernel = 1;
626 pt->attr.exclude_hv = 1;
627 pt->attr.exclude_idle = 1;
628
629 errno = 0;
630 scoped_fd fd (syscall (SYS_perf_event_open, &pt->attr, pid, -1, -1, 0));
631 if (fd.get () < 0)
632 diagnose_perf_event_open_fail ();
633
634 /* Allocate the configuration page. */
635 scoped_mmap data (nullptr, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED,
636 fd.get (), 0);
637 if (data.get () == MAP_FAILED)
638 error (_("Failed to map trace user page: %s."), safe_strerror (errno));
639
640 struct perf_event_mmap_page *header = (struct perf_event_mmap_page *)
641 data.get ();
642
643 header->aux_offset = header->data_offset + header->data_size;
644
645 /* Convert the requested size in bytes to pages (rounding up). */
646 pages = ((size_t) conf->size / PAGE_SIZE
647 + ((conf->size % PAGE_SIZE) == 0 ? 0 : 1));
648 /* We need at least one page. */
649 if (pages == 0)
650 pages = 1;
651
652 /* The buffer size can be requested in powers of two pages. Adjust PAGES
653 to the next power of two. */
654 for (pg = 0; pages != ((size_t) 1 << pg); ++pg)
655 if ((pages & ((size_t) 1 << pg)) != 0)
656 pages += ((size_t) 1 << pg);
657
658 /* We try to allocate the requested size.
659 If that fails, try to get as much as we can. */
660 scoped_mmap aux;
661 for (; pages > 0; pages >>= 1)
662 {
663 size_t length;
664 __u64 data_size;
665
666 data_size = (__u64) pages * PAGE_SIZE;
667
668 /* Don't ask for more than we can represent in the configuration. */
669 if ((__u64) UINT_MAX < data_size)
670 continue;
671
672 length = (size_t) data_size;
673
674 /* Check for overflows. */
675 if ((__u64) length != data_size)
676 continue;
677
678 header->aux_size = data_size;
679
680 errno = 0;
681 aux.reset (nullptr, length, PROT_READ, MAP_SHARED, fd.get (),
682 header->aux_offset);
683 if (aux.get () != MAP_FAILED)
684 break;
685 }
686
687 if (pages == 0)
688 error (_("Failed to map trace buffer: %s."), safe_strerror (errno));
689
690 pt->pt.size = aux.size ();
691 pt->pt.mem = (const uint8_t *) aux.release ();
692 pt->pt.data_head = &header->aux_head;
693 pt->header = (struct perf_event_mmap_page *) data.release ();
694 gdb_assert (pt->header == header);
695 pt->file = fd.release ();
696
697 tinfo->conf.pt.size = (unsigned int) pt->pt.size;
698 return tinfo.release ();
699 }
700
701 #else /* !defined (PERF_ATTR_SIZE_VER5) */
702
703 static struct btrace_target_info *
704 linux_enable_pt (ptid_t ptid, const struct btrace_config_pt *conf)
705 {
706 error (_("Intel Processor Trace support was disabled at compile time."));
707 }
708
709 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
710
711 /* See linux-btrace.h. */
712
713 struct btrace_target_info *
714 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
715 {
716 switch (conf->format)
717 {
718 case BTRACE_FORMAT_NONE:
719 error (_("Bad branch trace format."));
720
721 default:
722 error (_("Unknown branch trace format."));
723
724 case BTRACE_FORMAT_BTS:
725 return linux_enable_bts (ptid, &conf->bts);
726
727 case BTRACE_FORMAT_PT:
728 return linux_enable_pt (ptid, &conf->pt);
729 }
730 }
731
732 /* Disable BTS tracing. */
733
734 static enum btrace_error
735 linux_disable_bts (struct btrace_tinfo_bts *tinfo)
736 {
737 munmap((void *) tinfo->header, tinfo->bts.size + PAGE_SIZE);
738 close (tinfo->file);
739
740 return BTRACE_ERR_NONE;
741 }
742
743 /* Disable Intel Processor Trace tracing. */
744
745 static enum btrace_error
746 linux_disable_pt (struct btrace_tinfo_pt *tinfo)
747 {
748 munmap((void *) tinfo->pt.mem, tinfo->pt.size);
749 munmap((void *) tinfo->header, PAGE_SIZE);
750 close (tinfo->file);
751
752 return BTRACE_ERR_NONE;
753 }
754
755 /* See linux-btrace.h. */
756
757 enum btrace_error
758 linux_disable_btrace (struct btrace_target_info *tinfo)
759 {
760 enum btrace_error errcode;
761
762 errcode = BTRACE_ERR_NOT_SUPPORTED;
763 switch (tinfo->conf.format)
764 {
765 case BTRACE_FORMAT_NONE:
766 break;
767
768 case BTRACE_FORMAT_BTS:
769 errcode = linux_disable_bts (&tinfo->variant.bts);
770 break;
771
772 case BTRACE_FORMAT_PT:
773 errcode = linux_disable_pt (&tinfo->variant.pt);
774 break;
775 }
776
777 if (errcode == BTRACE_ERR_NONE)
778 xfree (tinfo);
779
780 return errcode;
781 }
782
783 /* Read branch trace data in BTS format for the thread given by TINFO into
784 BTRACE using the TYPE reading method. */
785
786 static enum btrace_error
787 linux_read_bts (struct btrace_data_bts *btrace,
788 struct btrace_target_info *tinfo,
789 enum btrace_read_type type)
790 {
791 struct perf_event_buffer *pevent;
792 const uint8_t *begin, *end, *start;
793 size_t buffer_size, size;
794 __u64 data_head = 0, data_tail;
795 unsigned int retries = 5;
796
797 pevent = &tinfo->variant.bts.bts;
798
799 /* For delta reads, we return at least the partial last block containing
800 the current PC. */
801 if (type == BTRACE_READ_NEW && !perf_event_new_data (pevent))
802 return BTRACE_ERR_NONE;
803
804 buffer_size = pevent->size;
805 data_tail = pevent->last_head;
806
807 /* We may need to retry reading the trace. See below. */
808 while (retries--)
809 {
810 data_head = *pevent->data_head;
811
812 /* Delete any leftover trace from the previous iteration. */
813 delete btrace->blocks;
814 btrace->blocks = nullptr;
815
816 if (type == BTRACE_READ_DELTA)
817 {
818 __u64 data_size;
819
820 /* Determine the number of bytes to read and check for buffer
821 overflows. */
822
823 /* Check for data head overflows. We might be able to recover from
824 those but they are very unlikely and it's not really worth the
825 effort, I think. */
826 if (data_head < data_tail)
827 return BTRACE_ERR_OVERFLOW;
828
829 /* If the buffer is smaller than the trace delta, we overflowed. */
830 data_size = data_head - data_tail;
831 if (buffer_size < data_size)
832 return BTRACE_ERR_OVERFLOW;
833
834 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
835 size = (size_t) data_size;
836 }
837 else
838 {
839 /* Read the entire buffer. */
840 size = buffer_size;
841
842 /* Adjust the size if the buffer has not overflowed, yet. */
843 if (data_head < size)
844 size = (size_t) data_head;
845 }
846
847 /* Data_head keeps growing; the buffer itself is circular. */
848 begin = pevent->mem;
849 start = begin + data_head % buffer_size;
850
851 if (data_head <= buffer_size)
852 end = start;
853 else
854 end = begin + pevent->size;
855
856 btrace->blocks = perf_event_read_bts (tinfo, begin, end, start, size);
857
858 /* The stopping thread notifies its ptracer before it is scheduled out.
859 On multi-core systems, the debugger might therefore run while the
860 kernel might be writing the last branch trace records.
861
862 Let's check whether the data head moved while we read the trace. */
863 if (data_head == *pevent->data_head)
864 break;
865 }
866
867 pevent->last_head = data_head;
868
869 /* Prune the incomplete last block (i.e. the first one of inferior execution)
870 if we're not doing a delta read. There is no way of filling in its zeroed
871 BEGIN element. */
872 if (!btrace->blocks->empty () && type != BTRACE_READ_DELTA)
873 btrace->blocks->pop_back ();
874
875 return BTRACE_ERR_NONE;
876 }
877
878 /* Fill in the Intel Processor Trace configuration information. */
879
880 static void
881 linux_fill_btrace_pt_config (struct btrace_data_pt_config *conf)
882 {
883 conf->cpu = btrace_this_cpu ();
884 }
885
886 /* Read branch trace data in Intel Processor Trace format for the thread
887 given by TINFO into BTRACE using the TYPE reading method. */
888
889 static enum btrace_error
890 linux_read_pt (struct btrace_data_pt *btrace,
891 struct btrace_target_info *tinfo,
892 enum btrace_read_type type)
893 {
894 struct perf_event_buffer *pt;
895
896 pt = &tinfo->variant.pt.pt;
897
898 linux_fill_btrace_pt_config (&btrace->config);
899
900 switch (type)
901 {
902 case BTRACE_READ_DELTA:
903 /* We don't support delta reads. The data head (i.e. aux_head) wraps
904 around to stay inside the aux buffer. */
905 return BTRACE_ERR_NOT_SUPPORTED;
906
907 case BTRACE_READ_NEW:
908 if (!perf_event_new_data (pt))
909 return BTRACE_ERR_NONE;
910
911 /* Fall through. */
912 case BTRACE_READ_ALL:
913 perf_event_read_all (pt, &btrace->data, &btrace->size);
914 return BTRACE_ERR_NONE;
915 }
916
917 internal_error (_("Unknown btrace read type."));
918 }
919
920 /* See linux-btrace.h. */
921
922 enum btrace_error
923 linux_read_btrace (struct btrace_data *btrace,
924 struct btrace_target_info *tinfo,
925 enum btrace_read_type type)
926 {
927 switch (tinfo->conf.format)
928 {
929 case BTRACE_FORMAT_NONE:
930 return BTRACE_ERR_NOT_SUPPORTED;
931
932 case BTRACE_FORMAT_BTS:
933 /* We read btrace in BTS format. */
934 btrace->format = BTRACE_FORMAT_BTS;
935 btrace->variant.bts.blocks = NULL;
936
937 return linux_read_bts (&btrace->variant.bts, tinfo, type);
938
939 case BTRACE_FORMAT_PT:
940 /* We read btrace in Intel Processor Trace format. */
941 btrace->format = BTRACE_FORMAT_PT;
942 btrace->variant.pt.data = NULL;
943 btrace->variant.pt.size = 0;
944
945 return linux_read_pt (&btrace->variant.pt, tinfo, type);
946 }
947
948 internal_error (_("Unkown branch trace format."));
949 }
950
951 /* See linux-btrace.h. */
952
953 const struct btrace_config *
954 linux_btrace_conf (const struct btrace_target_info *tinfo)
955 {
956 return &tinfo->conf;
957 }
958
959 #else /* !HAVE_LINUX_PERF_EVENT_H */
960
961 /* See linux-btrace.h. */
962
963 struct btrace_target_info *
964 linux_enable_btrace (ptid_t ptid, const struct btrace_config *conf)
965 {
966 return NULL;
967 }
968
969 /* See linux-btrace.h. */
970
971 enum btrace_error
972 linux_disable_btrace (struct btrace_target_info *tinfo)
973 {
974 return BTRACE_ERR_NOT_SUPPORTED;
975 }
976
977 /* See linux-btrace.h. */
978
979 enum btrace_error
980 linux_read_btrace (struct btrace_data *btrace,
981 struct btrace_target_info *tinfo,
982 enum btrace_read_type type)
983 {
984 return BTRACE_ERR_NOT_SUPPORTED;
985 }
986
987 /* See linux-btrace.h. */
988
989 const struct btrace_config *
990 linux_btrace_conf (const struct btrace_target_info *tinfo)
991 {
992 return NULL;
993 }
994
995 #endif /* !HAVE_LINUX_PERF_EVENT_H */