1 /* Linux-dependent part of branch trace support for GDB, and GDBserver.
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
22 #include "gdbsupport/common-defs.h"
23 #include "linux-btrace.h"
24 #include "gdbsupport/common-regcache.h"
25 #include "gdbsupport/gdb_wait.h"
26 #include "x86-cpuid.h"
27 #include "gdbsupport/filestuff.h"
28 #include "gdbsupport/scoped_fd.h"
29 #include "gdbsupport/scoped_mmap.h"
33 #include <sys/syscall.h>
35 #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open)
39 #include "nat/gdb_ptrace.h"
40 #include <sys/types.h>
43 /* A branch trace record in perf_event. */
46 /* The linear address of the branch source. */
49 /* The linear address of the branch destination. */
53 /* A perf_event branch trace sample. */
54 struct perf_event_sample
56 /* The perf_event sample header. */
57 struct perf_event_header header
;
59 /* The perf_event branch tracing payload. */
60 struct perf_event_bts bts
;
63 /* Identify the cpu we're running on. */
64 static struct btrace_cpu
65 btrace_this_cpu (void)
67 struct btrace_cpu cpu
;
68 unsigned int eax
, ebx
, ecx
, edx
;
71 memset (&cpu
, 0, sizeof (cpu
));
73 ok
= x86_cpuid (0, &eax
, &ebx
, &ecx
, &edx
);
76 if (ebx
== signature_INTEL_ebx
&& ecx
== signature_INTEL_ecx
77 && edx
== signature_INTEL_edx
)
79 unsigned int cpuid
, ignore
;
81 ok
= x86_cpuid (1, &cpuid
, &ignore
, &ignore
, &ignore
);
84 cpu
.vendor
= CV_INTEL
;
86 cpu
.family
= (cpuid
>> 8) & 0xf;
87 if (cpu
.family
== 0xf)
88 cpu
.family
+= (cpuid
>> 20) & 0xff;
90 cpu
.model
= (cpuid
>> 4) & 0xf;
91 if ((cpu
.family
== 0x6) || ((cpu
.family
& 0xf) == 0xf))
92 cpu
.model
+= (cpuid
>> 12) & 0xf0;
95 else if (ebx
== signature_AMD_ebx
&& ecx
== signature_AMD_ecx
96 && edx
== signature_AMD_edx
)
103 /* Return non-zero if there is new data in PEVENT; zero otherwise. */
106 perf_event_new_data (const struct perf_event_buffer
*pev
)
108 return *pev
->data_head
!= pev
->last_head
;
111 /* Copy the last SIZE bytes from PEV ending at DATA_HEAD and return a pointer
112 to the memory holding the copy.
113 The caller is responsible for freeing the memory. */
116 perf_event_read (const struct perf_event_buffer
*pev
, __u64 data_head
,
119 const gdb_byte
*begin
, *end
, *start
, *stop
;
127 /* We should never ask for more data than the buffer can hold. */
128 buffer_size
= pev
->size
;
129 gdb_assert (size
<= buffer_size
);
131 /* If we ask for more data than we seem to have, we wrap around and read
132 data from the end of the buffer. This is already handled by the %
133 BUFFER_SIZE operation, below. Here, we just need to make sure that we
136 Note that this is perfectly OK for perf event buffers where data_head
137 doesn'grow indefinitely and instead wraps around to remain within the
138 buffer's boundaries. */
139 if (data_head
< size
)
140 data_head
+= buffer_size
;
142 gdb_assert (size
<= data_head
);
143 data_tail
= data_head
- size
;
146 start
= begin
+ data_tail
% buffer_size
;
147 stop
= begin
+ data_head
% buffer_size
;
149 buffer
= (gdb_byte
*) xmalloc (size
);
152 memcpy (buffer
, start
, stop
- start
);
155 end
= begin
+ buffer_size
;
157 memcpy (buffer
, start
, end
- start
);
158 memcpy (buffer
+ (end
- start
), begin
, stop
- begin
);
164 /* Copy the perf event buffer data from PEV.
165 Store a pointer to the copy into DATA and its size in SIZE. */
168 perf_event_read_all (struct perf_event_buffer
*pev
, gdb_byte
**data
,
174 data_head
= *pev
->data_head
;
177 *data
= perf_event_read (pev
, data_head
, size
);
180 pev
->last_head
= data_head
;
183 /* Try to determine the start address of the Linux kernel. */
186 linux_determine_kernel_start (void)
188 static uint64_t kernel_start
;
196 gdb_file_up file
= gdb_fopen_cloexec ("/proc/kallsyms", "r");
200 while (!feof (file
.get ()))
202 char buffer
[1024], symbol
[8], *line
;
206 line
= fgets (buffer
, sizeof (buffer
), file
.get ());
210 match
= sscanf (line
, "%" SCNx64
" %*[tT] %7s", &addr
, symbol
);
214 if (strcmp (symbol
, "_text") == 0)
224 /* Check whether an address is in the kernel. */
227 perf_event_is_kernel_addr (uint64_t addr
)
229 uint64_t kernel_start
;
231 kernel_start
= linux_determine_kernel_start ();
232 if (kernel_start
!= 0ull)
233 return (addr
>= kernel_start
);
235 /* If we don't know the kernel's start address, let's check the most
236 significant bit. This will work at least for 64-bit kernels. */
237 return ((addr
& (1ull << 63)) != 0);
240 /* Check whether a perf event record should be skipped. */
243 perf_event_skip_bts_record (const struct perf_event_bts
*bts
)
245 /* The hardware may report branches from kernel into user space. Branches
246 from user into kernel space will be suppressed. We filter the former to
247 provide a consistent branch trace excluding kernel. */
248 return perf_event_is_kernel_addr (bts
->from
);
251 /* Perform a few consistency checks on a perf event sample record. This is
252 meant to catch cases when we get out of sync with the perf event stream. */
255 perf_event_sample_ok (const struct perf_event_sample
*sample
)
257 if (sample
->header
.type
!= PERF_RECORD_SAMPLE
)
260 if (sample
->header
.size
!= sizeof (*sample
))
266 /* Branch trace is collected in a circular buffer [begin; end) as pairs of from
267 and to addresses (plus a header).
269 Start points into that buffer at the next sample position.
270 We read the collected samples backwards from start.
272 While reading the samples, we convert the information into a list of blocks.
273 For two adjacent samples s1 and s2, we form a block b such that b.begin =
274 s1.to and b.end = s2.from.
276 In case the buffer overflows during sampling, one sample may have its lower
277 part at the end and its upper part at the beginning of the buffer. */
279 static std::vector
<btrace_block
> *
280 perf_event_read_bts (struct btrace_target_info
* tinfo
, const uint8_t *begin
,
281 const uint8_t *end
, const uint8_t *start
, size_t size
)
283 std::vector
<btrace_block
> *btrace
= new std::vector
<btrace_block
>;
284 struct perf_event_sample sample
;
286 struct btrace_block block
= { 0, 0 };
287 struct regcache
*regcache
;
289 gdb_assert (begin
<= start
);
290 gdb_assert (start
<= end
);
292 /* The first block ends at the current pc. */
293 regcache
= get_thread_regcache_for_ptid (tinfo
->ptid
);
294 block
.end
= regcache_read_pc (regcache
);
296 /* The buffer may contain a partial record as its last entry (i.e. when the
297 buffer size is not a multiple of the sample size). */
298 read
= sizeof (sample
) - 1;
300 for (; read
< size
; read
+= sizeof (sample
))
302 const struct perf_event_sample
*psample
;
304 /* Find the next perf_event sample in a backwards traversal. */
305 start
-= sizeof (sample
);
307 /* If we're still inside the buffer, we're done. */
309 psample
= (const struct perf_event_sample
*) start
;
314 /* We're to the left of the ring buffer, we will wrap around and
315 reappear at the very right of the ring buffer. */
317 missing
= (begin
- start
);
318 start
= (end
- missing
);
320 /* If the entire sample is missing, we're done. */
321 if (missing
== sizeof (sample
))
322 psample
= (const struct perf_event_sample
*) start
;
327 /* The sample wrapped around. The lower part is at the end and
328 the upper part is at the beginning of the buffer. */
329 stack
= (uint8_t *) &sample
;
331 /* Copy the two parts so we have a contiguous sample. */
332 memcpy (stack
, start
, missing
);
333 memcpy (stack
+ missing
, begin
, sizeof (sample
) - missing
);
339 if (!perf_event_sample_ok (psample
))
341 warning (_("Branch trace may be incomplete."));
345 if (perf_event_skip_bts_record (&psample
->bts
))
348 /* We found a valid sample, so we can complete the current block. */
349 block
.begin
= psample
->bts
.to
;
351 btrace
->push_back (block
);
353 /* Start the next block. */
354 block
.end
= psample
->bts
.from
;
357 /* Push the last block (i.e. the first one of inferior execution), as well.
358 We don't know where it ends, but we know where it starts. If we're
359 reading delta trace, we can fill in the start address later on.
360 Otherwise we will prune it. */
362 btrace
->push_back (block
);
367 /* Check whether an Intel cpu supports BTS. */
370 intel_supports_bts (const struct btrace_cpu
*cpu
)
377 case 0x1a: /* Nehalem */
381 case 0x25: /* Westmere */
384 case 0x2a: /* Sandy Bridge */
386 case 0x3a: /* Ivy Bridge */
388 /* AAJ122: LBR, BTM, or BTS records may have incorrect branch
389 "from" information afer an EIST transition, T-states, C1E, or
390 Adaptive Thermal Throttling. */
398 /* Check whether the cpu supports BTS. */
401 cpu_supports_bts (void)
403 struct btrace_cpu cpu
;
405 cpu
= btrace_this_cpu ();
409 /* Don't know about others. Let's assume they do. */
413 return intel_supports_bts (&cpu
);
420 /* The perf_event_open syscall failed. Try to print a helpful error
424 diagnose_perf_event_open_fail ()
431 static const char filename
[] = "/proc/sys/kernel/perf_event_paranoid";
433 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
434 if (file
.get () == nullptr)
435 error (_("Failed to open %s (%s). Your system does not support "
436 "process recording."), filename
, safe_strerror (errno
));
438 int level
, found
= fscanf (file
.get (), "%d", &level
);
439 if (found
== 1 && level
> 2)
440 error (_("You do not have permission to record the process. "
441 "Try setting %s to 2 or less."), filename
);
447 error (_("Failed to start recording: %s"), safe_strerror (errno
));
450 /* Enable branch tracing in BTS format. */
452 static struct btrace_target_info
*
453 linux_enable_bts (ptid_t ptid
, const struct btrace_config_bts
*conf
)
455 struct btrace_tinfo_bts
*bts
;
460 if (!cpu_supports_bts ())
461 error (_("BTS support has been disabled for the target cpu."));
463 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
464 (XCNEW (btrace_target_info
));
467 tinfo
->conf
.format
= BTRACE_FORMAT_BTS
;
468 bts
= &tinfo
->variant
.bts
;
470 bts
->attr
.size
= sizeof (bts
->attr
);
471 bts
->attr
.type
= PERF_TYPE_HARDWARE
;
472 bts
->attr
.config
= PERF_COUNT_HW_BRANCH_INSTRUCTIONS
;
473 bts
->attr
.sample_period
= 1;
475 /* We sample from and to address. */
476 bts
->attr
.sample_type
= PERF_SAMPLE_IP
| PERF_SAMPLE_ADDR
;
478 bts
->attr
.exclude_kernel
= 1;
479 bts
->attr
.exclude_hv
= 1;
480 bts
->attr
.exclude_idle
= 1;
487 scoped_fd
fd (syscall (SYS_perf_event_open
, &bts
->attr
, pid
, -1, -1, 0));
489 diagnose_perf_event_open_fail ();
491 /* Convert the requested size in bytes to pages (rounding up). */
492 pages
= ((size_t) conf
->size
/ PAGE_SIZE
493 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
494 /* We need at least one page. */
498 /* The buffer size can be requested in powers of two pages. Adjust PAGES
499 to the next power of two. */
500 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
501 if ((pages
& ((size_t) 1 << pg
)) != 0)
502 pages
+= ((size_t) 1 << pg
);
504 /* We try to allocate the requested size.
505 If that fails, try to get as much as we can. */
507 for (; pages
> 0; pages
>>= 1)
512 data_size
= (__u64
) pages
* PAGE_SIZE
;
514 /* Don't ask for more than we can represent in the configuration. */
515 if ((__u64
) UINT_MAX
< data_size
)
518 size
= (size_t) data_size
;
519 length
= size
+ PAGE_SIZE
;
521 /* Check for overflows. */
522 if ((__u64
) length
!= data_size
+ PAGE_SIZE
)
526 /* The number of pages we request needs to be a power of two. */
527 data
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (), 0);
528 if (data
.get () != MAP_FAILED
)
533 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
535 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
537 data_offset
= PAGE_SIZE
;
539 #if defined (PERF_ATTR_SIZE_VER5)
540 if (offsetof (struct perf_event_mmap_page
, data_size
) <= header
->size
)
544 data_offset
= header
->data_offset
;
545 data_size
= header
->data_size
;
547 size
= (unsigned int) data_size
;
549 /* Check for overflows. */
550 if ((__u64
) size
!= data_size
)
551 error (_("Failed to determine trace buffer size."));
553 #endif /* defined (PERF_ATTR_SIZE_VER5) */
555 bts
->bts
.size
= size
;
556 bts
->bts
.data_head
= &header
->data_head
;
557 bts
->bts
.mem
= (const uint8_t *) data
.release () + data_offset
;
558 bts
->bts
.last_head
= 0ull;
559 bts
->header
= header
;
560 bts
->file
= fd
.release ();
562 tinfo
->conf
.bts
.size
= (unsigned int) size
;
563 return tinfo
.release ();
566 #if defined (PERF_ATTR_SIZE_VER5)
568 /* Determine the event type. */
571 perf_event_pt_event_type ()
573 static const char filename
[] = "/sys/bus/event_source/devices/intel_pt/type";
576 gdb_file_up file
= gdb_fopen_cloexec (filename
, "r");
577 if (file
.get () == nullptr)
583 error (_("Failed to open %s (%s). You do not have permission "
584 "to use Intel PT."), filename
, safe_strerror (errno
));
588 error (_("Failed to open %s (%s). Your system does not support "
589 "Intel PT."), filename
, safe_strerror (errno
));
592 error (_("Failed to open %s: %s."), filename
, safe_strerror (errno
));
595 int type
, found
= fscanf (file
.get (), "%d", &type
);
597 error (_("Failed to read the PT event type from %s."), filename
);
602 /* Enable branch tracing in Intel Processor Trace format. */
604 static struct btrace_target_info
*
605 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
607 struct btrace_tinfo_pt
*pt
;
615 gdb::unique_xmalloc_ptr
<btrace_target_info
> tinfo
616 (XCNEW (btrace_target_info
));
619 tinfo
->conf
.format
= BTRACE_FORMAT_PT
;
620 pt
= &tinfo
->variant
.pt
;
622 pt
->attr
.size
= sizeof (pt
->attr
);
623 pt
->attr
.type
= perf_event_pt_event_type ();
625 pt
->attr
.exclude_kernel
= 1;
626 pt
->attr
.exclude_hv
= 1;
627 pt
->attr
.exclude_idle
= 1;
630 scoped_fd
fd (syscall (SYS_perf_event_open
, &pt
->attr
, pid
, -1, -1, 0));
632 diagnose_perf_event_open_fail ();
634 /* Allocate the configuration page. */
635 scoped_mmap
data (nullptr, PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
,
637 if (data
.get () == MAP_FAILED
)
638 error (_("Failed to map trace user page: %s."), safe_strerror (errno
));
640 struct perf_event_mmap_page
*header
= (struct perf_event_mmap_page
*)
643 header
->aux_offset
= header
->data_offset
+ header
->data_size
;
645 /* Convert the requested size in bytes to pages (rounding up). */
646 pages
= ((size_t) conf
->size
/ PAGE_SIZE
647 + ((conf
->size
% PAGE_SIZE
) == 0 ? 0 : 1));
648 /* We need at least one page. */
652 /* The buffer size can be requested in powers of two pages. Adjust PAGES
653 to the next power of two. */
654 for (pg
= 0; pages
!= ((size_t) 1 << pg
); ++pg
)
655 if ((pages
& ((size_t) 1 << pg
)) != 0)
656 pages
+= ((size_t) 1 << pg
);
658 /* We try to allocate the requested size.
659 If that fails, try to get as much as we can. */
661 for (; pages
> 0; pages
>>= 1)
666 data_size
= (__u64
) pages
* PAGE_SIZE
;
668 /* Don't ask for more than we can represent in the configuration. */
669 if ((__u64
) UINT_MAX
< data_size
)
672 length
= (size_t) data_size
;
674 /* Check for overflows. */
675 if ((__u64
) length
!= data_size
)
678 header
->aux_size
= data_size
;
681 aux
.reset (nullptr, length
, PROT_READ
, MAP_SHARED
, fd
.get (),
683 if (aux
.get () != MAP_FAILED
)
688 error (_("Failed to map trace buffer: %s."), safe_strerror (errno
));
690 pt
->pt
.size
= aux
.size ();
691 pt
->pt
.mem
= (const uint8_t *) aux
.release ();
692 pt
->pt
.data_head
= &header
->aux_head
;
693 pt
->header
= (struct perf_event_mmap_page
*) data
.release ();
694 gdb_assert (pt
->header
== header
);
695 pt
->file
= fd
.release ();
697 tinfo
->conf
.pt
.size
= (unsigned int) pt
->pt
.size
;
698 return tinfo
.release ();
701 #else /* !defined (PERF_ATTR_SIZE_VER5) */
703 static struct btrace_target_info
*
704 linux_enable_pt (ptid_t ptid
, const struct btrace_config_pt
*conf
)
706 error (_("Intel Processor Trace support was disabled at compile time."));
709 #endif /* !defined (PERF_ATTR_SIZE_VER5) */
711 /* See linux-btrace.h. */
713 struct btrace_target_info
*
714 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
716 switch (conf
->format
)
718 case BTRACE_FORMAT_NONE
:
719 error (_("Bad branch trace format."));
722 error (_("Unknown branch trace format."));
724 case BTRACE_FORMAT_BTS
:
725 return linux_enable_bts (ptid
, &conf
->bts
);
727 case BTRACE_FORMAT_PT
:
728 return linux_enable_pt (ptid
, &conf
->pt
);
732 /* Disable BTS tracing. */
734 static enum btrace_error
735 linux_disable_bts (struct btrace_tinfo_bts
*tinfo
)
737 munmap((void *) tinfo
->header
, tinfo
->bts
.size
+ PAGE_SIZE
);
740 return BTRACE_ERR_NONE
;
743 /* Disable Intel Processor Trace tracing. */
745 static enum btrace_error
746 linux_disable_pt (struct btrace_tinfo_pt
*tinfo
)
748 munmap((void *) tinfo
->pt
.mem
, tinfo
->pt
.size
);
749 munmap((void *) tinfo
->header
, PAGE_SIZE
);
752 return BTRACE_ERR_NONE
;
755 /* See linux-btrace.h. */
758 linux_disable_btrace (struct btrace_target_info
*tinfo
)
760 enum btrace_error errcode
;
762 errcode
= BTRACE_ERR_NOT_SUPPORTED
;
763 switch (tinfo
->conf
.format
)
765 case BTRACE_FORMAT_NONE
:
768 case BTRACE_FORMAT_BTS
:
769 errcode
= linux_disable_bts (&tinfo
->variant
.bts
);
772 case BTRACE_FORMAT_PT
:
773 errcode
= linux_disable_pt (&tinfo
->variant
.pt
);
777 if (errcode
== BTRACE_ERR_NONE
)
783 /* Read branch trace data in BTS format for the thread given by TINFO into
784 BTRACE using the TYPE reading method. */
786 static enum btrace_error
787 linux_read_bts (struct btrace_data_bts
*btrace
,
788 struct btrace_target_info
*tinfo
,
789 enum btrace_read_type type
)
791 struct perf_event_buffer
*pevent
;
792 const uint8_t *begin
, *end
, *start
;
793 size_t buffer_size
, size
;
794 __u64 data_head
= 0, data_tail
;
795 unsigned int retries
= 5;
797 pevent
= &tinfo
->variant
.bts
.bts
;
799 /* For delta reads, we return at least the partial last block containing
801 if (type
== BTRACE_READ_NEW
&& !perf_event_new_data (pevent
))
802 return BTRACE_ERR_NONE
;
804 buffer_size
= pevent
->size
;
805 data_tail
= pevent
->last_head
;
807 /* We may need to retry reading the trace. See below. */
810 data_head
= *pevent
->data_head
;
812 /* Delete any leftover trace from the previous iteration. */
813 delete btrace
->blocks
;
814 btrace
->blocks
= nullptr;
816 if (type
== BTRACE_READ_DELTA
)
820 /* Determine the number of bytes to read and check for buffer
823 /* Check for data head overflows. We might be able to recover from
824 those but they are very unlikely and it's not really worth the
826 if (data_head
< data_tail
)
827 return BTRACE_ERR_OVERFLOW
;
829 /* If the buffer is smaller than the trace delta, we overflowed. */
830 data_size
= data_head
- data_tail
;
831 if (buffer_size
< data_size
)
832 return BTRACE_ERR_OVERFLOW
;
834 /* DATA_SIZE <= BUFFER_SIZE and therefore fits into a size_t. */
835 size
= (size_t) data_size
;
839 /* Read the entire buffer. */
842 /* Adjust the size if the buffer has not overflowed, yet. */
843 if (data_head
< size
)
844 size
= (size_t) data_head
;
847 /* Data_head keeps growing; the buffer itself is circular. */
849 start
= begin
+ data_head
% buffer_size
;
851 if (data_head
<= buffer_size
)
854 end
= begin
+ pevent
->size
;
856 btrace
->blocks
= perf_event_read_bts (tinfo
, begin
, end
, start
, size
);
858 /* The stopping thread notifies its ptracer before it is scheduled out.
859 On multi-core systems, the debugger might therefore run while the
860 kernel might be writing the last branch trace records.
862 Let's check whether the data head moved while we read the trace. */
863 if (data_head
== *pevent
->data_head
)
867 pevent
->last_head
= data_head
;
869 /* Prune the incomplete last block (i.e. the first one of inferior execution)
870 if we're not doing a delta read. There is no way of filling in its zeroed
872 if (!btrace
->blocks
->empty () && type
!= BTRACE_READ_DELTA
)
873 btrace
->blocks
->pop_back ();
875 return BTRACE_ERR_NONE
;
878 /* Fill in the Intel Processor Trace configuration information. */
881 linux_fill_btrace_pt_config (struct btrace_data_pt_config
*conf
)
883 conf
->cpu
= btrace_this_cpu ();
886 /* Read branch trace data in Intel Processor Trace format for the thread
887 given by TINFO into BTRACE using the TYPE reading method. */
889 static enum btrace_error
890 linux_read_pt (struct btrace_data_pt
*btrace
,
891 struct btrace_target_info
*tinfo
,
892 enum btrace_read_type type
)
894 struct perf_event_buffer
*pt
;
896 pt
= &tinfo
->variant
.pt
.pt
;
898 linux_fill_btrace_pt_config (&btrace
->config
);
902 case BTRACE_READ_DELTA
:
903 /* We don't support delta reads. The data head (i.e. aux_head) wraps
904 around to stay inside the aux buffer. */
905 return BTRACE_ERR_NOT_SUPPORTED
;
907 case BTRACE_READ_NEW
:
908 if (!perf_event_new_data (pt
))
909 return BTRACE_ERR_NONE
;
912 case BTRACE_READ_ALL
:
913 perf_event_read_all (pt
, &btrace
->data
, &btrace
->size
);
914 return BTRACE_ERR_NONE
;
917 internal_error (_("Unknown btrace read type."));
920 /* See linux-btrace.h. */
923 linux_read_btrace (struct btrace_data
*btrace
,
924 struct btrace_target_info
*tinfo
,
925 enum btrace_read_type type
)
927 switch (tinfo
->conf
.format
)
929 case BTRACE_FORMAT_NONE
:
930 return BTRACE_ERR_NOT_SUPPORTED
;
932 case BTRACE_FORMAT_BTS
:
933 /* We read btrace in BTS format. */
934 btrace
->format
= BTRACE_FORMAT_BTS
;
935 btrace
->variant
.bts
.blocks
= NULL
;
937 return linux_read_bts (&btrace
->variant
.bts
, tinfo
, type
);
939 case BTRACE_FORMAT_PT
:
940 /* We read btrace in Intel Processor Trace format. */
941 btrace
->format
= BTRACE_FORMAT_PT
;
942 btrace
->variant
.pt
.data
= NULL
;
943 btrace
->variant
.pt
.size
= 0;
945 return linux_read_pt (&btrace
->variant
.pt
, tinfo
, type
);
948 internal_error (_("Unkown branch trace format."));
951 /* See linux-btrace.h. */
953 const struct btrace_config
*
954 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
959 #else /* !HAVE_LINUX_PERF_EVENT_H */
961 /* See linux-btrace.h. */
963 struct btrace_target_info
*
964 linux_enable_btrace (ptid_t ptid
, const struct btrace_config
*conf
)
969 /* See linux-btrace.h. */
972 linux_disable_btrace (struct btrace_target_info
*tinfo
)
974 return BTRACE_ERR_NOT_SUPPORTED
;
977 /* See linux-btrace.h. */
980 linux_read_btrace (struct btrace_data
*btrace
,
981 struct btrace_target_info
*tinfo
,
982 enum btrace_read_type type
)
984 return BTRACE_ERR_NOT_SUPPORTED
;
987 /* See linux-btrace.h. */
989 const struct btrace_config
*
990 linux_btrace_conf (const struct btrace_target_info
*tinfo
)
995 #endif /* !HAVE_LINUX_PERF_EVENT_H */