]>
Commit | Line | Data |
---|---|---|
7c97f91e MM |
1 | /* Linux-dependent part of branch trace support for GDB, and GDBserver. |
2 | ||
3 | Copyright (C) 2013 Free Software Foundation, Inc. | |
4 | ||
5 | Contributed by Intel Corp. <markus.t.metzger@intel.com> | |
6 | ||
7 | This file is part of GDB. | |
8 | ||
9 | This program is free software; you can redistribute it and/or modify | |
10 | it under the terms of the GNU General Public License as published by | |
11 | the Free Software Foundation; either version 3 of the License, or | |
12 | (at your option) any later version. | |
13 | ||
14 | This program is distributed in the hope that it will be useful, | |
15 | but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | GNU General Public License for more details. | |
18 | ||
19 | You should have received a copy of the GNU General Public License | |
20 | along with this program. If not, see <http://www.gnu.org/licenses/>. */ | |
21 | ||
22 | #ifdef GDBSERVER | |
23 | #include "server.h" | |
24 | #else | |
25 | #include "defs.h" | |
26 | #endif | |
27 | ||
28 | #include "linux-btrace.h" | |
29 | #include "common-utils.h" | |
30 | #include "gdb_assert.h" | |
31 | #include "regcache.h" | |
32 | #include "gdbthread.h" | |
be8b1ea6 | 33 | #include "gdb_wait.h" |
4d157a3d | 34 | #include "i386-cpuid.h" |
7c97f91e | 35 | |
5b4e221c MF |
36 | #ifdef HAVE_SYS_SYSCALL_H |
37 | #include <sys/syscall.h> | |
38 | #endif | |
39 | ||
40 | #if HAVE_LINUX_PERF_EVENT_H && defined(SYS_perf_event_open) | |
7c97f91e MM |
41 | |
42 | #include <errno.h> | |
43 | #include <string.h> | |
44 | #include <stdint.h> | |
45 | #include <unistd.h> | |
7c97f91e MM |
46 | #include <sys/mman.h> |
47 | #include <sys/user.h> | |
a950d57c MM |
48 | #include <sys/ptrace.h> |
49 | #include <sys/types.h> | |
a950d57c | 50 | #include <signal.h> |
7c97f91e MM |
51 | |
52 | /* A branch trace record in perf_event. */ | |
53 | struct perf_event_bts | |
54 | { | |
55 | /* The linear address of the branch source. */ | |
56 | uint64_t from; | |
57 | ||
58 | /* The linear address of the branch destination. */ | |
59 | uint64_t to; | |
60 | }; | |
61 | ||
62 | /* A perf_event branch trace sample. */ | |
63 | struct perf_event_sample | |
64 | { | |
65 | /* The perf_event sample header. */ | |
66 | struct perf_event_header header; | |
67 | ||
68 | /* The perf_event branch tracing payload. */ | |
69 | struct perf_event_bts bts; | |
70 | }; | |
71 | ||
72 | /* Get the perf_event header. */ | |
73 | ||
74 | static inline volatile struct perf_event_mmap_page * | |
75 | perf_event_header (struct btrace_target_info* tinfo) | |
76 | { | |
77 | return tinfo->buffer; | |
78 | } | |
79 | ||
80 | /* Get the size of the perf_event mmap buffer. */ | |
81 | ||
82 | static inline size_t | |
83 | perf_event_mmap_size (const struct btrace_target_info *tinfo) | |
84 | { | |
85 | /* The branch trace buffer is preceded by a configuration page. */ | |
86 | return (tinfo->size + 1) * PAGE_SIZE; | |
87 | } | |
88 | ||
89 | /* Get the size of the perf_event buffer. */ | |
90 | ||
91 | static inline size_t | |
92 | perf_event_buffer_size (struct btrace_target_info* tinfo) | |
93 | { | |
94 | return tinfo->size * PAGE_SIZE; | |
95 | } | |
96 | ||
97 | /* Get the start address of the perf_event buffer. */ | |
98 | ||
99 | static inline const uint8_t * | |
100 | perf_event_buffer_begin (struct btrace_target_info* tinfo) | |
101 | { | |
102 | return ((const uint8_t *) tinfo->buffer) + PAGE_SIZE; | |
103 | } | |
104 | ||
105 | /* Get the end address of the perf_event buffer. */ | |
106 | ||
107 | static inline const uint8_t * | |
108 | perf_event_buffer_end (struct btrace_target_info* tinfo) | |
109 | { | |
110 | return perf_event_buffer_begin (tinfo) + perf_event_buffer_size (tinfo); | |
111 | } | |
112 | ||
113 | /* Check whether an address is in the kernel. */ | |
114 | ||
115 | static inline int | |
116 | perf_event_is_kernel_addr (const struct btrace_target_info *tinfo, | |
117 | uint64_t addr) | |
118 | { | |
119 | uint64_t mask; | |
120 | ||
121 | /* If we don't know the size of a pointer, we can't check. Let's assume it's | |
122 | not a kernel address in this case. */ | |
123 | if (tinfo->ptr_bits == 0) | |
124 | return 0; | |
125 | ||
126 | /* A bit mask for the most significant bit in an address. */ | |
127 | mask = (uint64_t) 1 << (tinfo->ptr_bits - 1); | |
128 | ||
129 | /* Check whether the most significant bit in the address is set. */ | |
130 | return (addr & mask) != 0; | |
131 | } | |
132 | ||
133 | /* Check whether a perf event record should be skipped. */ | |
134 | ||
135 | static inline int | |
136 | perf_event_skip_record (const struct btrace_target_info *tinfo, | |
137 | const struct perf_event_bts *bts) | |
138 | { | |
139 | /* The hardware may report branches from kernel into user space. Branches | |
140 | from user into kernel space will be suppressed. We filter the former to | |
141 | provide a consistent branch trace excluding kernel. */ | |
142 | return perf_event_is_kernel_addr (tinfo, bts->from); | |
143 | } | |
144 | ||
145 | /* Perform a few consistency checks on a perf event sample record. This is | |
146 | meant to catch cases when we get out of sync with the perf event stream. */ | |
147 | ||
148 | static inline int | |
149 | perf_event_sample_ok (const struct perf_event_sample *sample) | |
150 | { | |
151 | if (sample->header.type != PERF_RECORD_SAMPLE) | |
152 | return 0; | |
153 | ||
154 | if (sample->header.size != sizeof (*sample)) | |
155 | return 0; | |
156 | ||
157 | return 1; | |
158 | } | |
159 | ||
160 | /* Branch trace is collected in a circular buffer [begin; end) as pairs of from | |
161 | and to addresses (plus a header). | |
162 | ||
163 | Start points into that buffer at the next sample position. | |
164 | We read the collected samples backwards from start. | |
165 | ||
166 | While reading the samples, we convert the information into a list of blocks. | |
167 | For two adjacent samples s1 and s2, we form a block b such that b.begin = | |
168 | s1.to and b.end = s2.from. | |
169 | ||
170 | In case the buffer overflows during sampling, one sample may have its lower | |
171 | part at the end and its upper part at the beginning of the buffer. */ | |
172 | ||
173 | static VEC (btrace_block_s) * | |
174 | perf_event_read_bts (struct btrace_target_info* tinfo, const uint8_t *begin, | |
175 | const uint8_t *end, const uint8_t *start) | |
176 | { | |
177 | VEC (btrace_block_s) *btrace = NULL; | |
178 | struct perf_event_sample sample; | |
179 | size_t read = 0, size = (end - begin); | |
180 | struct btrace_block block = { 0, 0 }; | |
181 | struct regcache *regcache; | |
182 | ||
183 | gdb_assert (begin <= start); | |
184 | gdb_assert (start <= end); | |
185 | ||
186 | /* The first block ends at the current pc. */ | |
187 | #ifdef GDBSERVER | |
188 | regcache = get_thread_regcache (find_thread_ptid (tinfo->ptid), 1); | |
189 | #else | |
190 | regcache = get_thread_regcache (tinfo->ptid); | |
191 | #endif | |
192 | block.end = regcache_read_pc (regcache); | |
193 | ||
194 | /* The buffer may contain a partial record as its last entry (i.e. when the | |
195 | buffer size is not a multiple of the sample size). */ | |
196 | read = sizeof (sample) - 1; | |
197 | ||
198 | for (; read < size; read += sizeof (sample)) | |
199 | { | |
200 | const struct perf_event_sample *psample; | |
201 | ||
202 | /* Find the next perf_event sample in a backwards traversal. */ | |
203 | start -= sizeof (sample); | |
204 | ||
205 | /* If we're still inside the buffer, we're done. */ | |
206 | if (begin <= start) | |
207 | psample = (const struct perf_event_sample *) start; | |
208 | else | |
209 | { | |
210 | int missing; | |
211 | ||
212 | /* We're to the left of the ring buffer, we will wrap around and | |
213 | reappear at the very right of the ring buffer. */ | |
214 | ||
215 | missing = (begin - start); | |
216 | start = (end - missing); | |
217 | ||
218 | /* If the entire sample is missing, we're done. */ | |
219 | if (missing == sizeof (sample)) | |
220 | psample = (const struct perf_event_sample *) start; | |
221 | else | |
222 | { | |
223 | uint8_t *stack; | |
224 | ||
225 | /* The sample wrapped around. The lower part is at the end and | |
226 | the upper part is at the beginning of the buffer. */ | |
227 | stack = (uint8_t *) &sample; | |
228 | ||
229 | /* Copy the two parts so we have a contiguous sample. */ | |
230 | memcpy (stack, start, missing); | |
231 | memcpy (stack + missing, begin, sizeof (sample) - missing); | |
232 | ||
233 | psample = &sample; | |
234 | } | |
235 | } | |
236 | ||
237 | if (!perf_event_sample_ok (psample)) | |
238 | { | |
239 | warning (_("Branch trace may be incomplete.")); | |
240 | break; | |
241 | } | |
242 | ||
243 | if (perf_event_skip_record (tinfo, &psample->bts)) | |
244 | continue; | |
245 | ||
246 | /* We found a valid sample, so we can complete the current block. */ | |
247 | block.begin = psample->bts.to; | |
248 | ||
249 | VEC_safe_push (btrace_block_s, btrace, &block); | |
250 | ||
251 | /* Start the next block. */ | |
252 | block.end = psample->bts.from; | |
253 | } | |
254 | ||
255 | return btrace; | |
256 | } | |
257 | ||
a950d57c MM |
258 | /* Check whether the kernel supports branch tracing. */ |
259 | ||
260 | static int | |
261 | kernel_supports_btrace (void) | |
262 | { | |
263 | struct perf_event_attr attr; | |
264 | pid_t child, pid; | |
265 | int status, file; | |
266 | ||
267 | errno = 0; | |
268 | child = fork (); | |
269 | switch (child) | |
270 | { | |
271 | case -1: | |
272 | warning (_("test branch tracing: cannot fork: %s."), strerror (errno)); | |
273 | return 0; | |
274 | ||
275 | case 0: | |
276 | status = ptrace (PTRACE_TRACEME, 0, NULL, NULL); | |
277 | if (status != 0) | |
278 | { | |
279 | warning (_("test branch tracing: cannot PTRACE_TRACEME: %s."), | |
280 | strerror (errno)); | |
281 | _exit (1); | |
282 | } | |
283 | ||
284 | status = raise (SIGTRAP); | |
285 | if (status != 0) | |
286 | { | |
287 | warning (_("test branch tracing: cannot raise SIGTRAP: %s."), | |
288 | strerror (errno)); | |
289 | _exit (1); | |
290 | } | |
291 | ||
292 | _exit (1); | |
293 | ||
294 | default: | |
295 | pid = waitpid (child, &status, 0); | |
296 | if (pid != child) | |
297 | { | |
298 | warning (_("test branch tracing: bad pid %ld, error: %s."), | |
299 | (long) pid, strerror (errno)); | |
300 | return 0; | |
301 | } | |
302 | ||
303 | if (!WIFSTOPPED (status)) | |
304 | { | |
305 | warning (_("test branch tracing: expected stop. status: %d."), | |
306 | status); | |
307 | return 0; | |
308 | } | |
309 | ||
310 | memset (&attr, 0, sizeof (attr)); | |
311 | ||
312 | attr.type = PERF_TYPE_HARDWARE; | |
313 | attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; | |
314 | attr.sample_period = 1; | |
315 | attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR; | |
316 | attr.exclude_kernel = 1; | |
317 | attr.exclude_hv = 1; | |
318 | attr.exclude_idle = 1; | |
319 | ||
320 | file = syscall (SYS_perf_event_open, &attr, child, -1, -1, 0); | |
321 | if (file >= 0) | |
322 | close (file); | |
323 | ||
324 | kill (child, SIGKILL); | |
325 | ptrace (PTRACE_KILL, child, NULL, NULL); | |
326 | ||
327 | pid = waitpid (child, &status, 0); | |
328 | if (pid != child) | |
329 | { | |
330 | warning (_("test branch tracing: bad pid %ld, error: %s."), | |
331 | (long) pid, strerror (errno)); | |
332 | if (!WIFSIGNALED (status)) | |
333 | warning (_("test branch tracing: expected killed. status: %d."), | |
334 | status); | |
335 | } | |
336 | ||
337 | return (file >= 0); | |
338 | } | |
339 | } | |
340 | ||
341 | /* Check whether an Intel cpu supports branch tracing. */ | |
342 | ||
343 | static int | |
344 | intel_supports_btrace (void) | |
345 | { | |
5f8e0b8f MF |
346 | unsigned int cpuid, model, family; |
347 | ||
4d157a3d MF |
348 | if (!i386_cpuid (1, &cpuid, NULL, NULL, NULL)) |
349 | return 0; | |
5f8e0b8f MF |
350 | |
351 | family = (cpuid >> 8) & 0xf; | |
352 | model = (cpuid >> 4) & 0xf; | |
353 | ||
354 | switch (family) | |
355 | { | |
356 | case 0x6: | |
357 | model += (cpuid >> 12) & 0xf0; | |
358 | ||
359 | switch (model) | |
360 | { | |
361 | case 0x1a: /* Nehalem */ | |
362 | case 0x1f: | |
363 | case 0x1e: | |
364 | case 0x2e: | |
365 | case 0x25: /* Westmere */ | |
366 | case 0x2c: | |
367 | case 0x2f: | |
368 | case 0x2a: /* Sandy Bridge */ | |
369 | case 0x2d: | |
370 | case 0x3a: /* Ivy Bridge */ | |
371 | ||
372 | /* AAJ122: LBR, BTM, or BTS records may have incorrect branch | |
373 | "from" information afer an EIST transition, T-states, C1E, or | |
374 | Adaptive Thermal Throttling. */ | |
375 | return 0; | |
376 | } | |
377 | } | |
a950d57c MM |
378 | |
379 | return 1; | |
a950d57c MM |
380 | } |
381 | ||
382 | /* Check whether the cpu supports branch tracing. */ | |
383 | ||
384 | static int | |
385 | cpu_supports_btrace (void) | |
386 | { | |
4d157a3d | 387 | unsigned int ebx, ecx, edx; |
a950d57c | 388 | |
4d157a3d MF |
389 | if (!i386_cpuid (0, NULL, &ebx, &ecx, &edx)) |
390 | return 0; | |
391 | ||
4353c9e6 JK |
392 | if (ebx == signature_INTEL_ebx && ecx == signature_INTEL_ecx |
393 | && edx == signature_INTEL_edx) | |
a950d57c MM |
394 | return intel_supports_btrace (); |
395 | ||
396 | /* Don't know about others. Let's assume they do. */ | |
397 | return 1; | |
a950d57c MM |
398 | } |
399 | ||
7c97f91e MM |
400 | /* See linux-btrace.h. */ |
401 | ||
402 | int | |
403 | linux_supports_btrace (void) | |
404 | { | |
a950d57c MM |
405 | static int cached; |
406 | ||
407 | if (cached == 0) | |
408 | { | |
409 | if (!kernel_supports_btrace ()) | |
410 | cached = -1; | |
411 | else if (!cpu_supports_btrace ()) | |
412 | cached = -1; | |
413 | else | |
414 | cached = 1; | |
415 | } | |
416 | ||
417 | return cached > 0; | |
7c97f91e MM |
418 | } |
419 | ||
420 | /* See linux-btrace.h. */ | |
421 | ||
422 | struct btrace_target_info * | |
423 | linux_enable_btrace (ptid_t ptid) | |
424 | { | |
425 | struct btrace_target_info *tinfo; | |
426 | int pid; | |
427 | ||
428 | tinfo = xzalloc (sizeof (*tinfo)); | |
429 | tinfo->ptid = ptid; | |
430 | ||
431 | tinfo->attr.size = sizeof (tinfo->attr); | |
432 | tinfo->attr.type = PERF_TYPE_HARDWARE; | |
433 | tinfo->attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS; | |
434 | tinfo->attr.sample_period = 1; | |
435 | ||
436 | /* We sample from and to address. */ | |
437 | tinfo->attr.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_ADDR; | |
438 | ||
439 | tinfo->attr.exclude_kernel = 1; | |
440 | tinfo->attr.exclude_hv = 1; | |
441 | tinfo->attr.exclude_idle = 1; | |
442 | ||
443 | tinfo->ptr_bits = 0; | |
444 | ||
445 | pid = ptid_get_lwp (ptid); | |
446 | if (pid == 0) | |
447 | pid = ptid_get_pid (ptid); | |
448 | ||
449 | errno = 0; | |
450 | tinfo->file = syscall (SYS_perf_event_open, &tinfo->attr, pid, -1, -1, 0); | |
451 | if (tinfo->file < 0) | |
452 | goto err; | |
453 | ||
454 | /* We hard-code the trace buffer size. | |
455 | At some later time, we should make this configurable. */ | |
456 | tinfo->size = 1; | |
457 | tinfo->buffer = mmap (NULL, perf_event_mmap_size (tinfo), | |
458 | PROT_READ, MAP_SHARED, tinfo->file, 0); | |
459 | if (tinfo->buffer == MAP_FAILED) | |
460 | goto err_file; | |
461 | ||
462 | return tinfo; | |
463 | ||
464 | err_file: | |
465 | close (tinfo->file); | |
466 | ||
467 | err: | |
468 | xfree (tinfo); | |
469 | return NULL; | |
470 | } | |
471 | ||
472 | /* See linux-btrace.h. */ | |
473 | ||
474 | int | |
475 | linux_disable_btrace (struct btrace_target_info *tinfo) | |
476 | { | |
477 | int errcode; | |
478 | ||
479 | errno = 0; | |
480 | errcode = munmap (tinfo->buffer, perf_event_mmap_size (tinfo)); | |
481 | if (errcode != 0) | |
482 | return errno; | |
483 | ||
484 | close (tinfo->file); | |
485 | xfree (tinfo); | |
486 | ||
487 | return 0; | |
488 | } | |
489 | ||
490 | /* Check whether the branch trace has changed. */ | |
491 | ||
492 | static int | |
493 | linux_btrace_has_changed (struct btrace_target_info *tinfo) | |
494 | { | |
495 | volatile struct perf_event_mmap_page *header = perf_event_header (tinfo); | |
496 | ||
497 | return header->data_head != tinfo->data_head; | |
498 | } | |
499 | ||
500 | /* See linux-btrace.h. */ | |
501 | ||
502 | VEC (btrace_block_s) * | |
503 | linux_read_btrace (struct btrace_target_info *tinfo, | |
504 | enum btrace_read_type type) | |
505 | { | |
506 | VEC (btrace_block_s) *btrace = NULL; | |
507 | volatile struct perf_event_mmap_page *header; | |
508 | const uint8_t *begin, *end, *start; | |
509 | unsigned long data_head, retries = 5; | |
510 | size_t buffer_size; | |
511 | ||
512 | if (type == btrace_read_new && !linux_btrace_has_changed (tinfo)) | |
513 | return NULL; | |
514 | ||
515 | header = perf_event_header (tinfo); | |
516 | buffer_size = perf_event_buffer_size (tinfo); | |
517 | ||
518 | /* We may need to retry reading the trace. See below. */ | |
519 | while (retries--) | |
520 | { | |
521 | data_head = header->data_head; | |
522 | ||
523 | /* If there's new trace, let's read it. */ | |
524 | if (data_head != tinfo->data_head) | |
525 | { | |
526 | /* Data_head keeps growing; the buffer itself is circular. */ | |
527 | begin = perf_event_buffer_begin (tinfo); | |
528 | start = begin + data_head % buffer_size; | |
529 | ||
530 | if (data_head <= buffer_size) | |
531 | end = start; | |
532 | else | |
533 | end = perf_event_buffer_end (tinfo); | |
534 | ||
535 | btrace = perf_event_read_bts (tinfo, begin, end, start); | |
536 | } | |
537 | ||
538 | /* The stopping thread notifies its ptracer before it is scheduled out. | |
539 | On multi-core systems, the debugger might therefore run while the | |
540 | kernel might be writing the last branch trace records. | |
541 | ||
542 | Let's check whether the data head moved while we read the trace. */ | |
543 | if (data_head == header->data_head) | |
544 | break; | |
545 | } | |
546 | ||
547 | tinfo->data_head = data_head; | |
548 | ||
549 | return btrace; | |
550 | } | |
551 | ||
552 | #else /* !HAVE_LINUX_PERF_EVENT_H */ | |
553 | ||
554 | /* See linux-btrace.h. */ | |
555 | ||
556 | int | |
557 | linux_supports_btrace (void) | |
558 | { | |
559 | return 0; | |
560 | } | |
561 | ||
562 | /* See linux-btrace.h. */ | |
563 | ||
564 | struct btrace_target_info * | |
565 | linux_enable_btrace (ptid_t ptid) | |
566 | { | |
567 | return NULL; | |
568 | } | |
569 | ||
570 | /* See linux-btrace.h. */ | |
571 | ||
572 | int | |
573 | linux_disable_btrace (struct btrace_target_info *tinfo) | |
574 | { | |
575 | return ENOSYS; | |
576 | } | |
577 | ||
578 | /* See linux-btrace.h. */ | |
579 | ||
580 | VEC (btrace_block_s) * | |
581 | linux_read_btrace (struct btrace_target_info *tinfo, | |
582 | enum btrace_read_type type) | |
583 | { | |
584 | return NULL; | |
585 | } | |
586 | ||
587 | #endif /* !HAVE_LINUX_PERF_EVENT_H */ |