]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/btrace.c
btrace: Remove struct btrace_thread_info::{begin,end}.
[thirdparty/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40 #include <algorithm>
41
42 /* Command lists for btrace maintenance commands. */
43 static struct cmd_list_element *maint_btrace_cmdlist;
44 static struct cmd_list_element *maint_btrace_set_cmdlist;
45 static struct cmd_list_element *maint_btrace_show_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49 /* Control whether to skip PAD packets when computing the packet history. */
50 static int maint_btrace_pt_skip_pad = 1;
51
52 /* A vector of function segments. */
53 typedef struct btrace_function * bfun_s;
54 DEF_VEC_P (bfun_s);
55
56 static void btrace_add_pc (struct thread_info *tp);
57
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 fprintf_unfiltered (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return SYMBOL_PRINT_NAME (sym);
86
87 if (msym != NULL)
88 return MSYMBOL_PRINT_NAME (msym);
89
90 return "<unknown>";
91 }
92
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (symbol_symtab (sym));
106 else
107 filename = "<unknown>";
108
109 return filename;
110 }
111
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
114
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
122 }
123
124 /* Print an ftrace debug status message. */
125
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
137 ibegin = bfun->insn_offset;
138 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
139
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
142 }
143
144 /* Return the number of instructions in a given function call segment. */
145
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
148 {
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return VEC_length (btrace_insn_s, bfun->insn);
157 }
158
159 /* Return non-zero if BFUN does not match MFUN and FUN,
160 return zero otherwise. */
161
162 static int
163 ftrace_function_switched (const struct btrace_function *bfun,
164 const struct minimal_symbol *mfun,
165 const struct symbol *fun)
166 {
167 struct minimal_symbol *msym;
168 struct symbol *sym;
169
170 msym = bfun->msym;
171 sym = bfun->sym;
172
173 /* If the minimal symbol changed, we certainly switched functions. */
174 if (mfun != NULL && msym != NULL
175 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
176 return 1;
177
178 /* If the symbol changed, we certainly switched functions. */
179 if (fun != NULL && sym != NULL)
180 {
181 const char *bfname, *fname;
182
183 /* Check the function name. */
184 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
185 return 1;
186
187 /* Check the location of those functions, as well. */
188 bfname = symtab_to_fullname (symbol_symtab (sym));
189 fname = symtab_to_fullname (symbol_symtab (fun));
190 if (filename_cmp (fname, bfname) != 0)
191 return 1;
192 }
193
194 /* If we lost symbol information, we switched functions. */
195 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
196 return 1;
197
198 /* If we gained symbol information, we switched functions. */
199 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
200 return 1;
201
202 return 0;
203 }
204
205 /* Allocate and initialize a new branch trace function segment at the end of
206 the trace.
207 BTINFO is the branch trace information for the current thread.
208 MFUN and FUN are the symbol information we have for this function. */
209
210 static struct btrace_function *
211 ftrace_new_function (struct btrace_thread_info *btinfo,
212 struct minimal_symbol *mfun,
213 struct symbol *fun)
214 {
215 struct btrace_function *bfun;
216
217 bfun = XCNEW (struct btrace_function);
218
219 bfun->msym = mfun;
220 bfun->sym = fun;
221
222 if (btinfo->functions.empty ())
223 {
224 /* Start counting at one. */
225 bfun->number = 1;
226 bfun->insn_offset = 1;
227 }
228 else
229 {
230 struct btrace_function *prev = btinfo->functions.back ();
231
232 gdb_assert (prev->flow.next == NULL);
233 prev->flow.next = bfun;
234 bfun->flow.prev = prev;
235
236 bfun->number = prev->number + 1;
237 bfun->insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
238 bfun->level = prev->level;
239 }
240
241 btinfo->functions.push_back (bfun);
242 return bfun;
243 }
244
245 /* Update the UP field of a function segment. */
246
247 static void
248 ftrace_update_caller (struct btrace_function *bfun,
249 struct btrace_function *caller,
250 enum btrace_function_flag flags)
251 {
252 if (bfun->up != NULL)
253 ftrace_debug (bfun, "updating caller");
254
255 bfun->up = caller;
256 bfun->flags = flags;
257
258 ftrace_debug (bfun, "set caller");
259 ftrace_debug (caller, "..to");
260 }
261
262 /* Fix up the caller for all segments of a function. */
263
264 static void
265 ftrace_fixup_caller (struct btrace_function *bfun,
266 struct btrace_function *caller,
267 enum btrace_function_flag flags)
268 {
269 struct btrace_function *prev, *next;
270
271 ftrace_update_caller (bfun, caller, flags);
272
273 /* Update all function segments belonging to the same function. */
274 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
275 ftrace_update_caller (prev, caller, flags);
276
277 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
278 ftrace_update_caller (next, caller, flags);
279 }
280
281 /* Add a new function segment for a call at the end of the trace.
282 BTINFO is the branch trace information for the current thread.
283 MFUN and FUN are the symbol information we have for this function. */
284
285 static struct btrace_function *
286 ftrace_new_call (struct btrace_thread_info *btinfo,
287 struct minimal_symbol *mfun,
288 struct symbol *fun)
289 {
290 const unsigned int length = btinfo->functions.size ();
291 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
292
293 if (length != 0)
294 bfun->up = btinfo->functions[length - 1];
295 bfun->level += 1;
296
297 ftrace_debug (bfun, "new call");
298
299 return bfun;
300 }
301
302 /* Add a new function segment for a tail call at the end of the trace.
303 BTINFO is the branch trace information for the current thread.
304 MFUN and FUN are the symbol information we have for this function. */
305
306 static struct btrace_function *
307 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
308 struct minimal_symbol *mfun,
309 struct symbol *fun)
310 {
311 const unsigned int length = btinfo->functions.size ();
312 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
313
314 if (length != 0)
315 bfun->up = btinfo->functions[length - 1];
316 bfun->level += 1;
317 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
318
319 ftrace_debug (bfun, "new tail call");
320
321 return bfun;
322 }
323
324 /* Return the caller of BFUN or NULL if there is none. This function skips
325 tail calls in the call chain. */
326 static struct btrace_function *
327 ftrace_get_caller (struct btrace_function *bfun)
328 {
329 for (; bfun != NULL; bfun = bfun->up)
330 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
331 return bfun->up;
332
333 return NULL;
334 }
335
336 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
337 symbol information. */
338
339 static struct btrace_function *
340 ftrace_find_caller (struct btrace_function *bfun,
341 struct minimal_symbol *mfun,
342 struct symbol *fun)
343 {
344 for (; bfun != NULL; bfun = bfun->up)
345 {
346 /* Skip functions with incompatible symbol information. */
347 if (ftrace_function_switched (bfun, mfun, fun))
348 continue;
349
350 /* This is the function segment we're looking for. */
351 break;
352 }
353
354 return bfun;
355 }
356
357 /* Find the innermost caller in the back trace of BFUN, skipping all
358 function segments that do not end with a call instruction (e.g.
359 tail calls ending with a jump). */
360
361 static struct btrace_function *
362 ftrace_find_call (struct btrace_function *bfun)
363 {
364 for (; bfun != NULL; bfun = bfun->up)
365 {
366 struct btrace_insn *last;
367
368 /* Skip gaps. */
369 if (bfun->errcode != 0)
370 continue;
371
372 last = VEC_last (btrace_insn_s, bfun->insn);
373
374 if (last->iclass == BTRACE_INSN_CALL)
375 break;
376 }
377
378 return bfun;
379 }
380
381 /* Add a continuation segment for a function into which we return at the end of
382 the trace.
383 BTINFO is the branch trace information for the current thread.
384 MFUN and FUN are the symbol information we have for this function. */
385
386 static struct btrace_function *
387 ftrace_new_return (struct btrace_thread_info *btinfo,
388 struct minimal_symbol *mfun,
389 struct symbol *fun)
390 {
391 struct btrace_function *prev = btinfo->functions.back ();
392 struct btrace_function *bfun, *caller;
393
394 bfun = ftrace_new_function (btinfo, mfun, fun);
395
396 /* It is important to start at PREV's caller. Otherwise, we might find
397 PREV itself, if PREV is a recursive function. */
398 caller = ftrace_find_caller (prev->up, mfun, fun);
399 if (caller != NULL)
400 {
401 /* The caller of PREV is the preceding btrace function segment in this
402 function instance. */
403 gdb_assert (caller->segment.next == NULL);
404
405 caller->segment.next = bfun;
406 bfun->segment.prev = caller;
407
408 /* Maintain the function level. */
409 bfun->level = caller->level;
410
411 /* Maintain the call stack. */
412 bfun->up = caller->up;
413 bfun->flags = caller->flags;
414
415 ftrace_debug (bfun, "new return");
416 }
417 else
418 {
419 /* We did not find a caller. This could mean that something went
420 wrong or that the call is simply not included in the trace. */
421
422 /* Let's search for some actual call. */
423 caller = ftrace_find_call (prev->up);
424 if (caller == NULL)
425 {
426 /* There is no call in PREV's back trace. We assume that the
427 branch trace did not include it. */
428
429 /* Let's find the topmost function and add a new caller for it.
430 This should handle a series of initial tail calls. */
431 while (prev->up != NULL)
432 prev = prev->up;
433
434 bfun->level = prev->level - 1;
435
436 /* Fix up the call stack for PREV. */
437 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
438
439 ftrace_debug (bfun, "new return - no caller");
440 }
441 else
442 {
443 /* There is a call in PREV's back trace to which we should have
444 returned but didn't. Let's start a new, separate back trace
445 from PREV's level. */
446 bfun->level = prev->level - 1;
447
448 /* We fix up the back trace for PREV but leave other function segments
449 on the same level as they are.
450 This should handle things like schedule () correctly where we're
451 switching contexts. */
452 prev->up = bfun;
453 prev->flags = BFUN_UP_LINKS_TO_RET;
454
455 ftrace_debug (bfun, "new return - unknown caller");
456 }
457 }
458
459 return bfun;
460 }
461
462 /* Add a new function segment for a function switch at the end of the trace.
463 BTINFO is the branch trace information for the current thread.
464 MFUN and FUN are the symbol information we have for this function. */
465
466 static struct btrace_function *
467 ftrace_new_switch (struct btrace_thread_info *btinfo,
468 struct minimal_symbol *mfun,
469 struct symbol *fun)
470 {
471 struct btrace_function *prev = btinfo->functions.back ();
472 struct btrace_function *bfun;
473
474 /* This is an unexplained function switch. We can't really be sure about the
475 call stack, yet the best I can think of right now is to preserve it. */
476 bfun = ftrace_new_function (btinfo, mfun, fun);
477 bfun->up = prev->up;
478 bfun->flags = prev->flags;
479
480 ftrace_debug (bfun, "new switch");
481
482 return bfun;
483 }
484
485 /* Add a new function segment for a gap in the trace due to a decode error at
486 the end of the trace.
487 BTINFO is the branch trace information for the current thread.
488 ERRCODE is the format-specific error code. */
489
490 static struct btrace_function *
491 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode)
492 {
493 struct btrace_function *bfun;
494
495 if (btinfo->functions.empty ())
496 bfun = ftrace_new_function (btinfo, NULL, NULL);
497 else
498 {
499 /* We hijack the previous function segment if it was empty. */
500 bfun = btinfo->functions.back ();
501 if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
502 bfun = ftrace_new_function (btinfo, NULL, NULL);
503 }
504
505 bfun->errcode = errcode;
506
507 ftrace_debug (bfun, "new gap");
508
509 return bfun;
510 }
511
512 /* Update the current function segment at the end of the trace in BTINFO with
513 respect to the instruction at PC. This may create new function segments.
514 Return the chronologically latest function segment, never NULL. */
515
516 static struct btrace_function *
517 ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
518 {
519 struct bound_minimal_symbol bmfun;
520 struct minimal_symbol *mfun;
521 struct symbol *fun;
522 struct btrace_insn *last;
523 struct btrace_function *bfun;
524
525 /* Try to determine the function we're in. We use both types of symbols
526 to avoid surprises when we sometimes get a full symbol and sometimes
527 only a minimal symbol. */
528 fun = find_pc_function (pc);
529 bmfun = lookup_minimal_symbol_by_pc (pc);
530 mfun = bmfun.minsym;
531
532 if (fun == NULL && mfun == NULL)
533 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
534
535 /* If we didn't have a function, we create one. */
536 if (btinfo->functions.empty ())
537 return ftrace_new_function (btinfo, mfun, fun);
538
539 /* If we had a gap before, we create a function. */
540 bfun = btinfo->functions.back ();
541 if (bfun->errcode != 0)
542 return ftrace_new_function (btinfo, mfun, fun);
543
544 /* Check the last instruction, if we have one.
545 We do this check first, since it allows us to fill in the call stack
546 links in addition to the normal flow links. */
547 last = NULL;
548 if (!VEC_empty (btrace_insn_s, bfun->insn))
549 last = VEC_last (btrace_insn_s, bfun->insn);
550
551 if (last != NULL)
552 {
553 switch (last->iclass)
554 {
555 case BTRACE_INSN_RETURN:
556 {
557 const char *fname;
558
559 /* On some systems, _dl_runtime_resolve returns to the resolved
560 function instead of jumping to it. From our perspective,
561 however, this is a tailcall.
562 If we treated it as return, we wouldn't be able to find the
563 resolved function in our stack back trace. Hence, we would
564 lose the current stack back trace and start anew with an empty
565 back trace. When the resolved function returns, we would then
566 create a stack back trace with the same function names but
567 different frame id's. This will confuse stepping. */
568 fname = ftrace_print_function_name (bfun);
569 if (strcmp (fname, "_dl_runtime_resolve") == 0)
570 return ftrace_new_tailcall (btinfo, mfun, fun);
571
572 return ftrace_new_return (btinfo, mfun, fun);
573 }
574
575 case BTRACE_INSN_CALL:
576 /* Ignore calls to the next instruction. They are used for PIC. */
577 if (last->pc + last->size == pc)
578 break;
579
580 return ftrace_new_call (btinfo, mfun, fun);
581
582 case BTRACE_INSN_JUMP:
583 {
584 CORE_ADDR start;
585
586 start = get_pc_function_start (pc);
587
588 /* A jump to the start of a function is (typically) a tail call. */
589 if (start == pc)
590 return ftrace_new_tailcall (btinfo, mfun, fun);
591
592 /* If we can't determine the function for PC, we treat a jump at
593 the end of the block as tail call if we're switching functions
594 and as an intra-function branch if we don't. */
595 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
596 return ftrace_new_tailcall (btinfo, mfun, fun);
597
598 break;
599 }
600 }
601 }
602
603 /* Check if we're switching functions for some other reason. */
604 if (ftrace_function_switched (bfun, mfun, fun))
605 {
606 DEBUG_FTRACE ("switching from %s in %s at %s",
607 ftrace_print_insn_addr (last),
608 ftrace_print_function_name (bfun),
609 ftrace_print_filename (bfun));
610
611 return ftrace_new_switch (btinfo, mfun, fun);
612 }
613
614 return bfun;
615 }
616
617 /* Add the instruction at PC to BFUN's instructions. */
618
619 static void
620 ftrace_update_insns (struct btrace_function *bfun,
621 const struct btrace_insn *insn)
622 {
623 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
624
625 if (record_debug > 1)
626 ftrace_debug (bfun, "update insn");
627 }
628
629 /* Classify the instruction at PC. */
630
631 static enum btrace_insn_class
632 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
633 {
634 enum btrace_insn_class iclass;
635
636 iclass = BTRACE_INSN_OTHER;
637 TRY
638 {
639 if (gdbarch_insn_is_call (gdbarch, pc))
640 iclass = BTRACE_INSN_CALL;
641 else if (gdbarch_insn_is_ret (gdbarch, pc))
642 iclass = BTRACE_INSN_RETURN;
643 else if (gdbarch_insn_is_jump (gdbarch, pc))
644 iclass = BTRACE_INSN_JUMP;
645 }
646 CATCH (error, RETURN_MASK_ERROR)
647 {
648 }
649 END_CATCH
650
651 return iclass;
652 }
653
654 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
655 number of matching function segments or zero if the back traces do not
656 match. */
657
658 static int
659 ftrace_match_backtrace (struct btrace_function *lhs,
660 struct btrace_function *rhs)
661 {
662 int matches;
663
664 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
665 {
666 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
667 return 0;
668
669 lhs = ftrace_get_caller (lhs);
670 rhs = ftrace_get_caller (rhs);
671 }
672
673 return matches;
674 }
675
676 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments. */
677
678 static void
679 ftrace_fixup_level (struct btrace_function *bfun, int adjustment)
680 {
681 if (adjustment == 0)
682 return;
683
684 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
685 ftrace_debug (bfun, "..bfun");
686
687 for (; bfun != NULL; bfun = bfun->flow.next)
688 bfun->level += adjustment;
689 }
690
691 /* Recompute the global level offset. Traverse the function trace and compute
692 the global level offset as the negative of the minimal function level. */
693
694 static void
695 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
696 {
697 int level = INT_MAX;
698
699 if (btinfo == NULL)
700 return;
701
702 if (btinfo->functions.empty ())
703 return;
704
705 unsigned int length = btinfo->functions.size() - 1;
706 for (unsigned int i = 0; i < length; ++i)
707 level = std::min (level, btinfo->functions[i]->level);
708
709 /* The last function segment contains the current instruction, which is not
710 really part of the trace. If it contains just this one instruction, we
711 ignore the segment. */
712 struct btrace_function *last = btinfo->functions.back();
713 if (VEC_length (btrace_insn_s, last->insn) != 1)
714 level = std::min (level, last->level);
715
716 DEBUG_FTRACE ("setting global level offset: %d", -level);
717 btinfo->level = -level;
718 }
719
720 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
721 ftrace_connect_backtrace. */
722
723 static void
724 ftrace_connect_bfun (struct btrace_function *prev,
725 struct btrace_function *next)
726 {
727 DEBUG_FTRACE ("connecting...");
728 ftrace_debug (prev, "..prev");
729 ftrace_debug (next, "..next");
730
731 /* The function segments are not yet connected. */
732 gdb_assert (prev->segment.next == NULL);
733 gdb_assert (next->segment.prev == NULL);
734
735 prev->segment.next = next;
736 next->segment.prev = prev;
737
738 /* We may have moved NEXT to a different function level. */
739 ftrace_fixup_level (next, prev->level - next->level);
740
741 /* If we run out of back trace for one, let's use the other's. */
742 if (prev->up == NULL)
743 {
744 if (next->up != NULL)
745 {
746 DEBUG_FTRACE ("using next's callers");
747 ftrace_fixup_caller (prev, next->up, next->flags);
748 }
749 }
750 else if (next->up == NULL)
751 {
752 if (prev->up != NULL)
753 {
754 DEBUG_FTRACE ("using prev's callers");
755 ftrace_fixup_caller (next, prev->up, prev->flags);
756 }
757 }
758 else
759 {
760 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
761 link to add the tail callers to NEXT's back trace.
762
763 This removes NEXT->UP from NEXT's back trace. It will be added back
764 when connecting NEXT and PREV's callers - provided they exist.
765
766 If PREV's back trace consists of a series of tail calls without an
767 actual call, there will be no further connection and NEXT's caller will
768 be removed for good. To catch this case, we handle it here and connect
769 the top of PREV's back trace to NEXT's caller. */
770 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
771 {
772 struct btrace_function *caller;
773 btrace_function_flags flags;
774
775 /* We checked NEXT->UP above so CALLER can't be NULL. */
776 caller = next->up;
777 flags = next->flags;
778
779 DEBUG_FTRACE ("adding prev's tail calls to next");
780
781 ftrace_fixup_caller (next, prev->up, prev->flags);
782
783 for (prev = prev->up; prev != NULL; prev = prev->up)
784 {
785 /* At the end of PREV's back trace, continue with CALLER. */
786 if (prev->up == NULL)
787 {
788 DEBUG_FTRACE ("fixing up link for tailcall chain");
789 ftrace_debug (prev, "..top");
790 ftrace_debug (caller, "..up");
791
792 ftrace_fixup_caller (prev, caller, flags);
793
794 /* If we skipped any tail calls, this may move CALLER to a
795 different function level.
796
797 Note that changing CALLER's level is only OK because we
798 know that this is the last iteration of the bottom-to-top
799 walk in ftrace_connect_backtrace.
800
801 Otherwise we will fix up CALLER's level when we connect it
802 to PREV's caller in the next iteration. */
803 ftrace_fixup_level (caller, prev->level - caller->level - 1);
804 break;
805 }
806
807 /* There's nothing to do if we find a real call. */
808 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
809 {
810 DEBUG_FTRACE ("will fix up link in next iteration");
811 break;
812 }
813 }
814 }
815 }
816 }
817
818 /* Connect function segments on the same level in the back trace at LHS and RHS.
819 The back traces at LHS and RHS are expected to match according to
820 ftrace_match_backtrace. */
821
822 static void
823 ftrace_connect_backtrace (struct btrace_function *lhs,
824 struct btrace_function *rhs)
825 {
826 while (lhs != NULL && rhs != NULL)
827 {
828 struct btrace_function *prev, *next;
829
830 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
831
832 /* Connecting LHS and RHS may change the up link. */
833 prev = lhs;
834 next = rhs;
835
836 lhs = ftrace_get_caller (lhs);
837 rhs = ftrace_get_caller (rhs);
838
839 ftrace_connect_bfun (prev, next);
840 }
841 }
842
843 /* Bridge the gap between two function segments left and right of a gap if their
844 respective back traces match in at least MIN_MATCHES functions.
845
846 Returns non-zero if the gap could be bridged, zero otherwise. */
847
848 static int
849 ftrace_bridge_gap (struct btrace_function *lhs, struct btrace_function *rhs,
850 int min_matches)
851 {
852 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
853 int best_matches;
854
855 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
856 rhs->insn_offset - 1, min_matches);
857
858 best_matches = 0;
859 best_l = NULL;
860 best_r = NULL;
861
862 /* We search the back traces of LHS and RHS for valid connections and connect
863 the two functon segments that give the longest combined back trace. */
864
865 for (cand_l = lhs; cand_l != NULL; cand_l = ftrace_get_caller (cand_l))
866 for (cand_r = rhs; cand_r != NULL; cand_r = ftrace_get_caller (cand_r))
867 {
868 int matches;
869
870 matches = ftrace_match_backtrace (cand_l, cand_r);
871 if (best_matches < matches)
872 {
873 best_matches = matches;
874 best_l = cand_l;
875 best_r = cand_r;
876 }
877 }
878
879 /* We need at least MIN_MATCHES matches. */
880 gdb_assert (min_matches > 0);
881 if (best_matches < min_matches)
882 return 0;
883
884 DEBUG_FTRACE ("..matches: %d", best_matches);
885
886 /* We will fix up the level of BEST_R and succeeding function segments such
887 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
888
889 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
890 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
891
892 To catch this, we already fix up the level here where we can start at RHS
893 instead of at BEST_R. We will ignore the level fixup when connecting
894 BEST_L to BEST_R as they will already be on the same level. */
895 ftrace_fixup_level (rhs, best_l->level - best_r->level);
896
897 ftrace_connect_backtrace (best_l, best_r);
898
899 return best_matches;
900 }
901
902 /* Try to bridge gaps due to overflow or decode errors by connecting the
903 function segments that are separated by the gap. */
904
905 static void
906 btrace_bridge_gaps (struct thread_info *tp, VEC (bfun_s) **gaps)
907 {
908 VEC (bfun_s) *remaining;
909 struct cleanup *old_chain;
910 int min_matches;
911
912 DEBUG ("bridge gaps");
913
914 remaining = NULL;
915 old_chain = make_cleanup (VEC_cleanup (bfun_s), &remaining);
916
917 /* We require a minimum amount of matches for bridging a gap. The number of
918 required matches will be lowered with each iteration.
919
920 The more matches the higher our confidence that the bridging is correct.
921 For big gaps or small traces, however, it may not be feasible to require a
922 high number of matches. */
923 for (min_matches = 5; min_matches > 0; --min_matches)
924 {
925 /* Let's try to bridge as many gaps as we can. In some cases, we need to
926 skip a gap and revisit it again after we closed later gaps. */
927 while (!VEC_empty (bfun_s, *gaps))
928 {
929 struct btrace_function *gap;
930 unsigned int idx;
931
932 for (idx = 0; VEC_iterate (bfun_s, *gaps, idx, gap); ++idx)
933 {
934 struct btrace_function *lhs, *rhs;
935 int bridged;
936
937 /* We may have a sequence of gaps if we run from one error into
938 the next as we try to re-sync onto the trace stream. Ignore
939 all but the leftmost gap in such a sequence.
940
941 Also ignore gaps at the beginning of the trace. */
942 lhs = gap->flow.prev;
943 if (lhs == NULL || lhs->errcode != 0)
944 continue;
945
946 /* Skip gaps to the right. */
947 for (rhs = gap->flow.next; rhs != NULL; rhs = rhs->flow.next)
948 if (rhs->errcode == 0)
949 break;
950
951 /* Ignore gaps at the end of the trace. */
952 if (rhs == NULL)
953 continue;
954
955 bridged = ftrace_bridge_gap (lhs, rhs, min_matches);
956
957 /* Keep track of gaps we were not able to bridge and try again.
958 If we just pushed them to the end of GAPS we would risk an
959 infinite loop in case we simply cannot bridge a gap. */
960 if (bridged == 0)
961 VEC_safe_push (bfun_s, remaining, gap);
962 }
963
964 /* Let's see if we made any progress. */
965 if (VEC_length (bfun_s, remaining) == VEC_length (bfun_s, *gaps))
966 break;
967
968 VEC_free (bfun_s, *gaps);
969
970 *gaps = remaining;
971 remaining = NULL;
972 }
973
974 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
975 if (VEC_empty (bfun_s, *gaps))
976 break;
977
978 VEC_free (bfun_s, remaining);
979 }
980
981 do_cleanups (old_chain);
982
983 /* We may omit this in some cases. Not sure it is worth the extra
984 complication, though. */
985 ftrace_compute_global_level_offset (&tp->btrace);
986 }
987
988 /* Compute the function branch trace from BTS trace. */
989
990 static void
991 btrace_compute_ftrace_bts (struct thread_info *tp,
992 const struct btrace_data_bts *btrace,
993 VEC (bfun_s) **gaps)
994 {
995 struct btrace_thread_info *btinfo;
996 struct gdbarch *gdbarch;
997 unsigned int blk;
998 int level;
999
1000 gdbarch = target_gdbarch ();
1001 btinfo = &tp->btrace;
1002 blk = VEC_length (btrace_block_s, btrace->blocks);
1003
1004 if (btinfo->functions.empty ())
1005 level = INT_MAX;
1006 else
1007 level = -btinfo->level;
1008
1009 while (blk != 0)
1010 {
1011 btrace_block_s *block;
1012 CORE_ADDR pc;
1013
1014 blk -= 1;
1015
1016 block = VEC_index (btrace_block_s, btrace->blocks, blk);
1017 pc = block->begin;
1018
1019 for (;;)
1020 {
1021 struct btrace_function *bfun;
1022 struct btrace_insn insn;
1023 int size;
1024
1025 /* We should hit the end of the block. Warn if we went too far. */
1026 if (block->end < pc)
1027 {
1028 /* Indicate the gap in the trace. */
1029 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW);
1030
1031 VEC_safe_push (bfun_s, *gaps, bfun);
1032
1033 warning (_("Recorded trace may be corrupted at instruction "
1034 "%u (pc = %s)."), bfun->insn_offset - 1,
1035 core_addr_to_string_nz (pc));
1036
1037 break;
1038 }
1039
1040 bfun = ftrace_update_function (btinfo, pc);
1041
1042 /* Maintain the function level offset.
1043 For all but the last block, we do it here. */
1044 if (blk != 0)
1045 level = std::min (level, bfun->level);
1046
1047 size = 0;
1048 TRY
1049 {
1050 size = gdb_insn_length (gdbarch, pc);
1051 }
1052 CATCH (error, RETURN_MASK_ERROR)
1053 {
1054 }
1055 END_CATCH
1056
1057 insn.pc = pc;
1058 insn.size = size;
1059 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1060 insn.flags = 0;
1061
1062 ftrace_update_insns (bfun, &insn);
1063
1064 /* We're done once we pushed the instruction at the end. */
1065 if (block->end == pc)
1066 break;
1067
1068 /* We can't continue if we fail to compute the size. */
1069 if (size <= 0)
1070 {
1071 /* Indicate the gap in the trace. We just added INSN so we're
1072 not at the beginning. */
1073 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE);
1074
1075 VEC_safe_push (bfun_s, *gaps, bfun);
1076
1077 warning (_("Recorded trace may be incomplete at instruction %u "
1078 "(pc = %s)."), bfun->insn_offset - 1,
1079 core_addr_to_string_nz (pc));
1080
1081 break;
1082 }
1083
1084 pc += size;
1085
1086 /* Maintain the function level offset.
1087 For the last block, we do it here to not consider the last
1088 instruction.
1089 Since the last instruction corresponds to the current instruction
1090 and is not really part of the execution history, it shouldn't
1091 affect the level. */
1092 if (blk == 0)
1093 level = std::min (level, bfun->level);
1094 }
1095 }
1096
1097 /* LEVEL is the minimal function level of all btrace function segments.
1098 Define the global level offset to -LEVEL so all function levels are
1099 normalized to start at zero. */
1100 btinfo->level = -level;
1101 }
1102
1103 #if defined (HAVE_LIBIPT)
1104
1105 static enum btrace_insn_class
1106 pt_reclassify_insn (enum pt_insn_class iclass)
1107 {
1108 switch (iclass)
1109 {
1110 case ptic_call:
1111 return BTRACE_INSN_CALL;
1112
1113 case ptic_return:
1114 return BTRACE_INSN_RETURN;
1115
1116 case ptic_jump:
1117 return BTRACE_INSN_JUMP;
1118
1119 default:
1120 return BTRACE_INSN_OTHER;
1121 }
1122 }
1123
1124 /* Return the btrace instruction flags for INSN. */
1125
1126 static btrace_insn_flags
1127 pt_btrace_insn_flags (const struct pt_insn &insn)
1128 {
1129 btrace_insn_flags flags = 0;
1130
1131 if (insn.speculative)
1132 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1133
1134 return flags;
1135 }
1136
1137 /* Return the btrace instruction for INSN. */
1138
1139 static btrace_insn
1140 pt_btrace_insn (const struct pt_insn &insn)
1141 {
1142 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1143 pt_reclassify_insn (insn.iclass),
1144 pt_btrace_insn_flags (insn)};
1145 }
1146
1147
1148 /* Add function branch trace to BTINFO using DECODER. */
1149
1150 static void
1151 ftrace_add_pt (struct btrace_thread_info *btinfo,
1152 struct pt_insn_decoder *decoder,
1153 int *plevel,
1154 VEC (bfun_s) **gaps)
1155 {
1156 struct btrace_function *bfun;
1157 uint64_t offset;
1158 int errcode;
1159
1160 for (;;)
1161 {
1162 struct pt_insn insn;
1163
1164 errcode = pt_insn_sync_forward (decoder);
1165 if (errcode < 0)
1166 {
1167 if (errcode != -pte_eos)
1168 warning (_("Failed to synchronize onto the Intel Processor "
1169 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
1170 break;
1171 }
1172
1173 for (;;)
1174 {
1175 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
1176 if (errcode < 0)
1177 break;
1178
1179 /* Look for gaps in the trace - unless we're at the beginning. */
1180 if (!btinfo->functions.empty ())
1181 {
1182 /* Tracing is disabled and re-enabled each time we enter the
1183 kernel. Most times, we continue from the same instruction we
1184 stopped before. This is indicated via the RESUMED instruction
1185 flag. The ENABLED instruction flag means that we continued
1186 from some other instruction. Indicate this as a trace gap. */
1187 if (insn.enabled)
1188 {
1189 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED);
1190
1191 VEC_safe_push (bfun_s, *gaps, bfun);
1192
1193 pt_insn_get_offset (decoder, &offset);
1194
1195 warning (_("Non-contiguous trace at instruction %u (offset "
1196 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
1197 bfun->insn_offset - 1, offset, insn.ip);
1198 }
1199 }
1200
1201 /* Indicate trace overflows. */
1202 if (insn.resynced)
1203 {
1204 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW);
1205
1206 VEC_safe_push (bfun_s, *gaps, bfun);
1207
1208 pt_insn_get_offset (decoder, &offset);
1209
1210 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
1211 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1,
1212 offset, insn.ip);
1213 }
1214
1215 bfun = ftrace_update_function (btinfo, insn.ip);
1216
1217 /* Maintain the function level offset. */
1218 *plevel = std::min (*plevel, bfun->level);
1219
1220 btrace_insn btinsn = pt_btrace_insn (insn);
1221 ftrace_update_insns (bfun, &btinsn);
1222 }
1223
1224 if (errcode == -pte_eos)
1225 break;
1226
1227 /* Indicate the gap in the trace. */
1228 bfun = ftrace_new_gap (btinfo, errcode);
1229
1230 VEC_safe_push (bfun_s, *gaps, bfun);
1231
1232 pt_insn_get_offset (decoder, &offset);
1233
1234 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1235 ", pc = 0x%" PRIx64 "): %s."), errcode, bfun->insn_offset - 1,
1236 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
1237 }
1238 }
1239
1240 /* A callback function to allow the trace decoder to read the inferior's
1241 memory. */
1242
1243 static int
1244 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1245 const struct pt_asid *asid, uint64_t pc,
1246 void *context)
1247 {
1248 int result, errcode;
1249
1250 result = (int) size;
1251 TRY
1252 {
1253 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1254 if (errcode != 0)
1255 result = -pte_nomap;
1256 }
1257 CATCH (error, RETURN_MASK_ERROR)
1258 {
1259 result = -pte_nomap;
1260 }
1261 END_CATCH
1262
1263 return result;
1264 }
1265
1266 /* Translate the vendor from one enum to another. */
1267
1268 static enum pt_cpu_vendor
1269 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1270 {
1271 switch (vendor)
1272 {
1273 default:
1274 return pcv_unknown;
1275
1276 case CV_INTEL:
1277 return pcv_intel;
1278 }
1279 }
1280
1281 /* Finalize the function branch trace after decode. */
1282
1283 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1284 struct thread_info *tp, int level)
1285 {
1286 pt_insn_free_decoder (decoder);
1287
1288 /* LEVEL is the minimal function level of all btrace function segments.
1289 Define the global level offset to -LEVEL so all function levels are
1290 normalized to start at zero. */
1291 tp->btrace.level = -level;
1292
1293 /* Add a single last instruction entry for the current PC.
1294 This allows us to compute the backtrace at the current PC using both
1295 standard unwind and btrace unwind.
1296 This extra entry is ignored by all record commands. */
1297 btrace_add_pc (tp);
1298 }
1299
1300 /* Compute the function branch trace from Intel Processor Trace
1301 format. */
1302
1303 static void
1304 btrace_compute_ftrace_pt (struct thread_info *tp,
1305 const struct btrace_data_pt *btrace,
1306 VEC (bfun_s) **gaps)
1307 {
1308 struct btrace_thread_info *btinfo;
1309 struct pt_insn_decoder *decoder;
1310 struct pt_config config;
1311 int level, errcode;
1312
1313 if (btrace->size == 0)
1314 return;
1315
1316 btinfo = &tp->btrace;
1317 if (btinfo->functions.empty ())
1318 level = INT_MAX;
1319 else
1320 level = -btinfo->level;
1321
1322 pt_config_init(&config);
1323 config.begin = btrace->data;
1324 config.end = btrace->data + btrace->size;
1325
1326 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1327 config.cpu.family = btrace->config.cpu.family;
1328 config.cpu.model = btrace->config.cpu.model;
1329 config.cpu.stepping = btrace->config.cpu.stepping;
1330
1331 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1332 if (errcode < 0)
1333 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
1334 pt_errstr (pt_errcode (errcode)));
1335
1336 decoder = pt_insn_alloc_decoder (&config);
1337 if (decoder == NULL)
1338 error (_("Failed to allocate the Intel Processor Trace decoder."));
1339
1340 TRY
1341 {
1342 struct pt_image *image;
1343
1344 image = pt_insn_get_image(decoder);
1345 if (image == NULL)
1346 error (_("Failed to configure the Intel Processor Trace decoder."));
1347
1348 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1349 if (errcode < 0)
1350 error (_("Failed to configure the Intel Processor Trace decoder: "
1351 "%s."), pt_errstr (pt_errcode (errcode)));
1352
1353 ftrace_add_pt (btinfo, decoder, &level, gaps);
1354 }
1355 CATCH (error, RETURN_MASK_ALL)
1356 {
1357 /* Indicate a gap in the trace if we quit trace processing. */
1358 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1359 {
1360 struct btrace_function *bfun;
1361
1362 bfun = ftrace_new_gap (btinfo, BDE_PT_USER_QUIT);
1363
1364 VEC_safe_push (bfun_s, *gaps, bfun);
1365 }
1366
1367 btrace_finalize_ftrace_pt (decoder, tp, level);
1368
1369 throw_exception (error);
1370 }
1371 END_CATCH
1372
1373 btrace_finalize_ftrace_pt (decoder, tp, level);
1374 }
1375
1376 #else /* defined (HAVE_LIBIPT) */
1377
1378 static void
1379 btrace_compute_ftrace_pt (struct thread_info *tp,
1380 const struct btrace_data_pt *btrace,
1381 VEC (bfun_s) **gaps)
1382 {
1383 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1384 }
1385
1386 #endif /* defined (HAVE_LIBIPT) */
1387
1388 /* Compute the function branch trace from a block branch trace BTRACE for
1389 a thread given by BTINFO. */
1390
1391 static void
1392 btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
1393 VEC (bfun_s) **gaps)
1394 {
1395 DEBUG ("compute ftrace");
1396
1397 switch (btrace->format)
1398 {
1399 case BTRACE_FORMAT_NONE:
1400 return;
1401
1402 case BTRACE_FORMAT_BTS:
1403 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1404 return;
1405
1406 case BTRACE_FORMAT_PT:
1407 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1408 return;
1409 }
1410
1411 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1412 }
1413
1414 static void
1415 btrace_finalize_ftrace (struct thread_info *tp, VEC (bfun_s) **gaps)
1416 {
1417 if (!VEC_empty (bfun_s, *gaps))
1418 {
1419 tp->btrace.ngaps += VEC_length (bfun_s, *gaps);
1420 btrace_bridge_gaps (tp, gaps);
1421 }
1422 }
1423
1424 static void
1425 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1426 {
1427 VEC (bfun_s) *gaps;
1428 struct cleanup *old_chain;
1429
1430 gaps = NULL;
1431 old_chain = make_cleanup (VEC_cleanup (bfun_s), &gaps);
1432
1433 TRY
1434 {
1435 btrace_compute_ftrace_1 (tp, btrace, &gaps);
1436 }
1437 CATCH (error, RETURN_MASK_ALL)
1438 {
1439 btrace_finalize_ftrace (tp, &gaps);
1440
1441 throw_exception (error);
1442 }
1443 END_CATCH
1444
1445 btrace_finalize_ftrace (tp, &gaps);
1446
1447 do_cleanups (old_chain);
1448 }
1449
1450 /* Add an entry for the current PC. */
1451
1452 static void
1453 btrace_add_pc (struct thread_info *tp)
1454 {
1455 struct btrace_data btrace;
1456 struct btrace_block *block;
1457 struct regcache *regcache;
1458 struct cleanup *cleanup;
1459 CORE_ADDR pc;
1460
1461 regcache = get_thread_regcache (tp->ptid);
1462 pc = regcache_read_pc (regcache);
1463
1464 btrace_data_init (&btrace);
1465 btrace.format = BTRACE_FORMAT_BTS;
1466 btrace.variant.bts.blocks = NULL;
1467
1468 cleanup = make_cleanup_btrace_data (&btrace);
1469
1470 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1471 block->begin = pc;
1472 block->end = pc;
1473
1474 btrace_compute_ftrace (tp, &btrace);
1475
1476 do_cleanups (cleanup);
1477 }
1478
1479 /* See btrace.h. */
1480
1481 void
1482 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1483 {
1484 if (tp->btrace.target != NULL)
1485 return;
1486
1487 #if !defined (HAVE_LIBIPT)
1488 if (conf->format == BTRACE_FORMAT_PT)
1489 error (_("GDB does not support Intel Processor Trace."));
1490 #endif /* !defined (HAVE_LIBIPT) */
1491
1492 if (!target_supports_btrace (conf->format))
1493 error (_("Target does not support branch tracing."));
1494
1495 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1496 target_pid_to_str (tp->ptid));
1497
1498 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1499
1500 /* We're done if we failed to enable tracing. */
1501 if (tp->btrace.target == NULL)
1502 return;
1503
1504 /* We need to undo the enable in case of errors. */
1505 TRY
1506 {
1507 /* Add an entry for the current PC so we start tracing from where we
1508 enabled it.
1509
1510 If we can't access TP's registers, TP is most likely running. In this
1511 case, we can't really say where tracing was enabled so it should be
1512 safe to simply skip this step.
1513
1514 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1515 start at the PC at which tracing was enabled. */
1516 if (conf->format != BTRACE_FORMAT_PT
1517 && can_access_registers_ptid (tp->ptid))
1518 btrace_add_pc (tp);
1519 }
1520 CATCH (exception, RETURN_MASK_ALL)
1521 {
1522 btrace_disable (tp);
1523
1524 throw_exception (exception);
1525 }
1526 END_CATCH
1527 }
1528
1529 /* See btrace.h. */
1530
1531 const struct btrace_config *
1532 btrace_conf (const struct btrace_thread_info *btinfo)
1533 {
1534 if (btinfo->target == NULL)
1535 return NULL;
1536
1537 return target_btrace_conf (btinfo->target);
1538 }
1539
1540 /* See btrace.h. */
1541
1542 void
1543 btrace_disable (struct thread_info *tp)
1544 {
1545 struct btrace_thread_info *btp = &tp->btrace;
1546 int errcode = 0;
1547
1548 if (btp->target == NULL)
1549 return;
1550
1551 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1552 target_pid_to_str (tp->ptid));
1553
1554 target_disable_btrace (btp->target);
1555 btp->target = NULL;
1556
1557 btrace_clear (tp);
1558 }
1559
1560 /* See btrace.h. */
1561
1562 void
1563 btrace_teardown (struct thread_info *tp)
1564 {
1565 struct btrace_thread_info *btp = &tp->btrace;
1566 int errcode = 0;
1567
1568 if (btp->target == NULL)
1569 return;
1570
1571 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1572 target_pid_to_str (tp->ptid));
1573
1574 target_teardown_btrace (btp->target);
1575 btp->target = NULL;
1576
1577 btrace_clear (tp);
1578 }
1579
1580 /* Stitch branch trace in BTS format. */
1581
1582 static int
1583 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1584 {
1585 struct btrace_thread_info *btinfo;
1586 struct btrace_function *last_bfun;
1587 struct btrace_insn *last_insn;
1588 btrace_block_s *first_new_block;
1589
1590 btinfo = &tp->btrace;
1591 gdb_assert (!btinfo->functions.empty ());
1592 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1593
1594 last_bfun = btinfo->functions.back ();
1595
1596 /* If the existing trace ends with a gap, we just glue the traces
1597 together. We need to drop the last (i.e. chronologically first) block
1598 of the new trace, though, since we can't fill in the start address.*/
1599 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1600 {
1601 VEC_pop (btrace_block_s, btrace->blocks);
1602 return 0;
1603 }
1604
1605 /* Beware that block trace starts with the most recent block, so the
1606 chronologically first block in the new trace is the last block in
1607 the new trace's block vector. */
1608 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1609 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1610
1611 /* If the current PC at the end of the block is the same as in our current
1612 trace, there are two explanations:
1613 1. we executed the instruction and some branch brought us back.
1614 2. we have not made any progress.
1615 In the first case, the delta trace vector should contain at least two
1616 entries.
1617 In the second case, the delta trace vector should contain exactly one
1618 entry for the partial block containing the current PC. Remove it. */
1619 if (first_new_block->end == last_insn->pc
1620 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1621 {
1622 VEC_pop (btrace_block_s, btrace->blocks);
1623 return 0;
1624 }
1625
1626 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1627 core_addr_to_string_nz (first_new_block->end));
1628
1629 /* Do a simple sanity check to make sure we don't accidentally end up
1630 with a bad block. This should not occur in practice. */
1631 if (first_new_block->end < last_insn->pc)
1632 {
1633 warning (_("Error while trying to read delta trace. Falling back to "
1634 "a full read."));
1635 return -1;
1636 }
1637
1638 /* We adjust the last block to start at the end of our current trace. */
1639 gdb_assert (first_new_block->begin == 0);
1640 first_new_block->begin = last_insn->pc;
1641
1642 /* We simply pop the last insn so we can insert it again as part of
1643 the normal branch trace computation.
1644 Since instruction iterators are based on indices in the instructions
1645 vector, we don't leave any pointers dangling. */
1646 DEBUG ("pruning insn at %s for stitching",
1647 ftrace_print_insn_addr (last_insn));
1648
1649 VEC_pop (btrace_insn_s, last_bfun->insn);
1650
1651 /* The instructions vector may become empty temporarily if this has
1652 been the only instruction in this function segment.
1653 This violates the invariant but will be remedied shortly by
1654 btrace_compute_ftrace when we add the new trace. */
1655
1656 /* The only case where this would hurt is if the entire trace consisted
1657 of just that one instruction. If we remove it, we might turn the now
1658 empty btrace function segment into a gap. But we don't want gaps at
1659 the beginning. To avoid this, we remove the entire old trace. */
1660 if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
1661 btrace_clear (tp);
1662
1663 return 0;
1664 }
1665
1666 /* Adjust the block trace in order to stitch old and new trace together.
1667 BTRACE is the new delta trace between the last and the current stop.
1668 TP is the traced thread.
1669 May modifx BTRACE as well as the existing trace in TP.
1670 Return 0 on success, -1 otherwise. */
1671
1672 static int
1673 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1674 {
1675 /* If we don't have trace, there's nothing to do. */
1676 if (btrace_data_empty (btrace))
1677 return 0;
1678
1679 switch (btrace->format)
1680 {
1681 case BTRACE_FORMAT_NONE:
1682 return 0;
1683
1684 case BTRACE_FORMAT_BTS:
1685 return btrace_stitch_bts (&btrace->variant.bts, tp);
1686
1687 case BTRACE_FORMAT_PT:
1688 /* Delta reads are not supported. */
1689 return -1;
1690 }
1691
1692 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1693 }
1694
1695 /* Clear the branch trace histories in BTINFO. */
1696
1697 static void
1698 btrace_clear_history (struct btrace_thread_info *btinfo)
1699 {
1700 xfree (btinfo->insn_history);
1701 xfree (btinfo->call_history);
1702 xfree (btinfo->replay);
1703
1704 btinfo->insn_history = NULL;
1705 btinfo->call_history = NULL;
1706 btinfo->replay = NULL;
1707 }
1708
1709 /* Clear the branch trace maintenance histories in BTINFO. */
1710
1711 static void
1712 btrace_maint_clear (struct btrace_thread_info *btinfo)
1713 {
1714 switch (btinfo->data.format)
1715 {
1716 default:
1717 break;
1718
1719 case BTRACE_FORMAT_BTS:
1720 btinfo->maint.variant.bts.packet_history.begin = 0;
1721 btinfo->maint.variant.bts.packet_history.end = 0;
1722 break;
1723
1724 #if defined (HAVE_LIBIPT)
1725 case BTRACE_FORMAT_PT:
1726 xfree (btinfo->maint.variant.pt.packets);
1727
1728 btinfo->maint.variant.pt.packets = NULL;
1729 btinfo->maint.variant.pt.packet_history.begin = 0;
1730 btinfo->maint.variant.pt.packet_history.end = 0;
1731 break;
1732 #endif /* defined (HAVE_LIBIPT) */
1733 }
1734 }
1735
1736 /* See btrace.h. */
1737
1738 const char *
1739 btrace_decode_error (enum btrace_format format, int errcode)
1740 {
1741 switch (format)
1742 {
1743 case BTRACE_FORMAT_BTS:
1744 switch (errcode)
1745 {
1746 case BDE_BTS_OVERFLOW:
1747 return _("instruction overflow");
1748
1749 case BDE_BTS_INSN_SIZE:
1750 return _("unknown instruction");
1751
1752 default:
1753 break;
1754 }
1755 break;
1756
1757 #if defined (HAVE_LIBIPT)
1758 case BTRACE_FORMAT_PT:
1759 switch (errcode)
1760 {
1761 case BDE_PT_USER_QUIT:
1762 return _("trace decode cancelled");
1763
1764 case BDE_PT_DISABLED:
1765 return _("disabled");
1766
1767 case BDE_PT_OVERFLOW:
1768 return _("overflow");
1769
1770 default:
1771 if (errcode < 0)
1772 return pt_errstr (pt_errcode (errcode));
1773 break;
1774 }
1775 break;
1776 #endif /* defined (HAVE_LIBIPT) */
1777
1778 default:
1779 break;
1780 }
1781
1782 return _("unknown");
1783 }
1784
1785 /* See btrace.h. */
1786
1787 void
1788 btrace_fetch (struct thread_info *tp)
1789 {
1790 struct btrace_thread_info *btinfo;
1791 struct btrace_target_info *tinfo;
1792 struct btrace_data btrace;
1793 struct cleanup *cleanup;
1794 int errcode;
1795
1796 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1797 target_pid_to_str (tp->ptid));
1798
1799 btinfo = &tp->btrace;
1800 tinfo = btinfo->target;
1801 if (tinfo == NULL)
1802 return;
1803
1804 /* There's no way we could get new trace while replaying.
1805 On the other hand, delta trace would return a partial record with the
1806 current PC, which is the replay PC, not the last PC, as expected. */
1807 if (btinfo->replay != NULL)
1808 return;
1809
1810 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1811 can store a gdb.Record object in Python referring to a different thread
1812 than the current one, temporarily set INFERIOR_PTID. */
1813 cleanup = save_inferior_ptid ();
1814 inferior_ptid = tp->ptid;
1815
1816 /* We should not be called on running or exited threads. */
1817 gdb_assert (can_access_registers_ptid (tp->ptid));
1818
1819 btrace_data_init (&btrace);
1820 make_cleanup_btrace_data (&btrace);
1821
1822 /* Let's first try to extend the trace we already have. */
1823 if (!btinfo->functions.empty ())
1824 {
1825 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1826 if (errcode == 0)
1827 {
1828 /* Success. Let's try to stitch the traces together. */
1829 errcode = btrace_stitch_trace (&btrace, tp);
1830 }
1831 else
1832 {
1833 /* We failed to read delta trace. Let's try to read new trace. */
1834 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1835
1836 /* If we got any new trace, discard what we have. */
1837 if (errcode == 0 && !btrace_data_empty (&btrace))
1838 btrace_clear (tp);
1839 }
1840
1841 /* If we were not able to read the trace, we start over. */
1842 if (errcode != 0)
1843 {
1844 btrace_clear (tp);
1845 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1846 }
1847 }
1848 else
1849 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1850
1851 /* If we were not able to read the branch trace, signal an error. */
1852 if (errcode != 0)
1853 error (_("Failed to read branch trace."));
1854
1855 /* Compute the trace, provided we have any. */
1856 if (!btrace_data_empty (&btrace))
1857 {
1858 /* Store the raw trace data. The stored data will be cleared in
1859 btrace_clear, so we always append the new trace. */
1860 btrace_data_append (&btinfo->data, &btrace);
1861 btrace_maint_clear (btinfo);
1862
1863 btrace_clear_history (btinfo);
1864 btrace_compute_ftrace (tp, &btrace);
1865 }
1866
1867 do_cleanups (cleanup);
1868 }
1869
1870 /* See btrace.h. */
1871
1872 void
1873 btrace_clear (struct thread_info *tp)
1874 {
1875 struct btrace_thread_info *btinfo;
1876
1877 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1878 target_pid_to_str (tp->ptid));
1879
1880 /* Make sure btrace frames that may hold a pointer into the branch
1881 trace data are destroyed. */
1882 reinit_frame_cache ();
1883
1884 btinfo = &tp->btrace;
1885 for (auto &bfun : btinfo->functions)
1886 {
1887 VEC_free (btrace_insn_s, bfun->insn);
1888 xfree (bfun);
1889 }
1890
1891 btinfo->functions.clear ();
1892 btinfo->ngaps = 0;
1893
1894 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1895 btrace_maint_clear (btinfo);
1896 btrace_data_clear (&btinfo->data);
1897 btrace_clear_history (btinfo);
1898 }
1899
1900 /* See btrace.h. */
1901
1902 void
1903 btrace_free_objfile (struct objfile *objfile)
1904 {
1905 struct thread_info *tp;
1906
1907 DEBUG ("free objfile");
1908
1909 ALL_NON_EXITED_THREADS (tp)
1910 btrace_clear (tp);
1911 }
1912
1913 #if defined (HAVE_LIBEXPAT)
1914
1915 /* Check the btrace document version. */
1916
1917 static void
1918 check_xml_btrace_version (struct gdb_xml_parser *parser,
1919 const struct gdb_xml_element *element,
1920 void *user_data, VEC (gdb_xml_value_s) *attributes)
1921 {
1922 const char *version
1923 = (const char *) xml_find_attribute (attributes, "version")->value;
1924
1925 if (strcmp (version, "1.0") != 0)
1926 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1927 }
1928
1929 /* Parse a btrace "block" xml record. */
1930
1931 static void
1932 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1933 const struct gdb_xml_element *element,
1934 void *user_data, VEC (gdb_xml_value_s) *attributes)
1935 {
1936 struct btrace_data *btrace;
1937 struct btrace_block *block;
1938 ULONGEST *begin, *end;
1939
1940 btrace = (struct btrace_data *) user_data;
1941
1942 switch (btrace->format)
1943 {
1944 case BTRACE_FORMAT_BTS:
1945 break;
1946
1947 case BTRACE_FORMAT_NONE:
1948 btrace->format = BTRACE_FORMAT_BTS;
1949 btrace->variant.bts.blocks = NULL;
1950 break;
1951
1952 default:
1953 gdb_xml_error (parser, _("Btrace format error."));
1954 }
1955
1956 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1957 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1958
1959 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1960 block->begin = *begin;
1961 block->end = *end;
1962 }
1963
1964 /* Parse a "raw" xml record. */
1965
1966 static void
1967 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1968 gdb_byte **pdata, size_t *psize)
1969 {
1970 struct cleanup *cleanup;
1971 gdb_byte *data, *bin;
1972 size_t len, size;
1973
1974 len = strlen (body_text);
1975 if (len % 2 != 0)
1976 gdb_xml_error (parser, _("Bad raw data size."));
1977
1978 size = len / 2;
1979
1980 bin = data = (gdb_byte *) xmalloc (size);
1981 cleanup = make_cleanup (xfree, data);
1982
1983 /* We use hex encoding - see common/rsp-low.h. */
1984 while (len > 0)
1985 {
1986 char hi, lo;
1987
1988 hi = *body_text++;
1989 lo = *body_text++;
1990
1991 if (hi == 0 || lo == 0)
1992 gdb_xml_error (parser, _("Bad hex encoding."));
1993
1994 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1995 len -= 2;
1996 }
1997
1998 discard_cleanups (cleanup);
1999
2000 *pdata = data;
2001 *psize = size;
2002 }
2003
2004 /* Parse a btrace pt-config "cpu" xml record. */
2005
2006 static void
2007 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2008 const struct gdb_xml_element *element,
2009 void *user_data,
2010 VEC (gdb_xml_value_s) *attributes)
2011 {
2012 struct btrace_data *btrace;
2013 const char *vendor;
2014 ULONGEST *family, *model, *stepping;
2015
2016 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2017 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2018 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2019 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
2020
2021 btrace = (struct btrace_data *) user_data;
2022
2023 if (strcmp (vendor, "GenuineIntel") == 0)
2024 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2025
2026 btrace->variant.pt.config.cpu.family = *family;
2027 btrace->variant.pt.config.cpu.model = *model;
2028 btrace->variant.pt.config.cpu.stepping = *stepping;
2029 }
2030
2031 /* Parse a btrace pt "raw" xml record. */
2032
2033 static void
2034 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2035 const struct gdb_xml_element *element,
2036 void *user_data, const char *body_text)
2037 {
2038 struct btrace_data *btrace;
2039
2040 btrace = (struct btrace_data *) user_data;
2041 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2042 &btrace->variant.pt.size);
2043 }
2044
2045 /* Parse a btrace "pt" xml record. */
2046
2047 static void
2048 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2049 const struct gdb_xml_element *element,
2050 void *user_data, VEC (gdb_xml_value_s) *attributes)
2051 {
2052 struct btrace_data *btrace;
2053
2054 btrace = (struct btrace_data *) user_data;
2055 btrace->format = BTRACE_FORMAT_PT;
2056 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2057 btrace->variant.pt.data = NULL;
2058 btrace->variant.pt.size = 0;
2059 }
2060
2061 static const struct gdb_xml_attribute block_attributes[] = {
2062 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2063 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2064 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2065 };
2066
2067 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2068 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2069 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2070 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2071 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2072 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2073 };
2074
2075 static const struct gdb_xml_element btrace_pt_config_children[] = {
2076 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2077 parse_xml_btrace_pt_config_cpu, NULL },
2078 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2079 };
2080
2081 static const struct gdb_xml_element btrace_pt_children[] = {
2082 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2083 NULL },
2084 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2085 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2086 };
2087
2088 static const struct gdb_xml_attribute btrace_attributes[] = {
2089 { "version", GDB_XML_AF_NONE, NULL, NULL },
2090 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2091 };
2092
2093 static const struct gdb_xml_element btrace_children[] = {
2094 { "block", block_attributes, NULL,
2095 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
2096 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2097 NULL },
2098 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2099 };
2100
2101 static const struct gdb_xml_element btrace_elements[] = {
2102 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2103 check_xml_btrace_version, NULL },
2104 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2105 };
2106
2107 #endif /* defined (HAVE_LIBEXPAT) */
2108
2109 /* See btrace.h. */
2110
2111 void
2112 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
2113 {
2114 struct cleanup *cleanup;
2115 int errcode;
2116
2117 #if defined (HAVE_LIBEXPAT)
2118
2119 btrace->format = BTRACE_FORMAT_NONE;
2120
2121 cleanup = make_cleanup_btrace_data (btrace);
2122 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
2123 buffer, btrace);
2124 if (errcode != 0)
2125 error (_("Error parsing branch trace."));
2126
2127 /* Keep parse results. */
2128 discard_cleanups (cleanup);
2129
2130 #else /* !defined (HAVE_LIBEXPAT) */
2131
2132 error (_("Cannot process branch trace. XML parsing is not supported."));
2133
2134 #endif /* !defined (HAVE_LIBEXPAT) */
2135 }
2136
2137 #if defined (HAVE_LIBEXPAT)
2138
2139 /* Parse a btrace-conf "bts" xml record. */
2140
2141 static void
2142 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2143 const struct gdb_xml_element *element,
2144 void *user_data, VEC (gdb_xml_value_s) *attributes)
2145 {
2146 struct btrace_config *conf;
2147 struct gdb_xml_value *size;
2148
2149 conf = (struct btrace_config *) user_data;
2150 conf->format = BTRACE_FORMAT_BTS;
2151 conf->bts.size = 0;
2152
2153 size = xml_find_attribute (attributes, "size");
2154 if (size != NULL)
2155 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
2156 }
2157
2158 /* Parse a btrace-conf "pt" xml record. */
2159
2160 static void
2161 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2162 const struct gdb_xml_element *element,
2163 void *user_data, VEC (gdb_xml_value_s) *attributes)
2164 {
2165 struct btrace_config *conf;
2166 struct gdb_xml_value *size;
2167
2168 conf = (struct btrace_config *) user_data;
2169 conf->format = BTRACE_FORMAT_PT;
2170 conf->pt.size = 0;
2171
2172 size = xml_find_attribute (attributes, "size");
2173 if (size != NULL)
2174 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2175 }
2176
2177 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2178 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2179 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2180 };
2181
2182 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2183 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2184 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2185 };
2186
2187 static const struct gdb_xml_element btrace_conf_children[] = {
2188 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2189 parse_xml_btrace_conf_bts, NULL },
2190 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2191 parse_xml_btrace_conf_pt, NULL },
2192 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2193 };
2194
2195 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2196 { "version", GDB_XML_AF_NONE, NULL, NULL },
2197 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2198 };
2199
2200 static const struct gdb_xml_element btrace_conf_elements[] = {
2201 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2202 GDB_XML_EF_NONE, NULL, NULL },
2203 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2204 };
2205
2206 #endif /* defined (HAVE_LIBEXPAT) */
2207
2208 /* See btrace.h. */
2209
2210 void
2211 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2212 {
2213 int errcode;
2214
2215 #if defined (HAVE_LIBEXPAT)
2216
2217 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2218 btrace_conf_elements, xml, conf);
2219 if (errcode != 0)
2220 error (_("Error parsing branch trace configuration."));
2221
2222 #else /* !defined (HAVE_LIBEXPAT) */
2223
2224 error (_("XML parsing is not supported."));
2225
2226 #endif /* !defined (HAVE_LIBEXPAT) */
2227 }
2228
2229 /* See btrace.h. */
2230
2231 const struct btrace_insn *
2232 btrace_insn_get (const struct btrace_insn_iterator *it)
2233 {
2234 const struct btrace_function *bfun;
2235 unsigned int index, end;
2236
2237 index = it->insn_index;
2238 bfun = it->btinfo->functions[it->call_index];
2239
2240 /* Check if the iterator points to a gap in the trace. */
2241 if (bfun->errcode != 0)
2242 return NULL;
2243
2244 /* The index is within the bounds of this function's instruction vector. */
2245 end = VEC_length (btrace_insn_s, bfun->insn);
2246 gdb_assert (0 < end);
2247 gdb_assert (index < end);
2248
2249 return VEC_index (btrace_insn_s, bfun->insn, index);
2250 }
2251
2252 /* See btrace.h. */
2253
2254 int
2255 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2256 {
2257 const struct btrace_function *bfun;
2258
2259 bfun = it->btinfo->functions[it->call_index];
2260 return bfun->errcode;
2261 }
2262
2263 /* See btrace.h. */
2264
2265 unsigned int
2266 btrace_insn_number (const struct btrace_insn_iterator *it)
2267 {
2268 const struct btrace_function *bfun;
2269
2270 bfun = it->btinfo->functions[it->call_index];
2271 return bfun->insn_offset + it->insn_index;
2272 }
2273
2274 /* See btrace.h. */
2275
2276 void
2277 btrace_insn_begin (struct btrace_insn_iterator *it,
2278 const struct btrace_thread_info *btinfo)
2279 {
2280 if (btinfo->functions.empty ())
2281 error (_("No trace."));
2282
2283 it->btinfo = btinfo;
2284 it->call_index = 0;
2285 it->insn_index = 0;
2286 }
2287
2288 /* See btrace.h. */
2289
2290 void
2291 btrace_insn_end (struct btrace_insn_iterator *it,
2292 const struct btrace_thread_info *btinfo)
2293 {
2294 const struct btrace_function *bfun;
2295 unsigned int length;
2296
2297 if (btinfo->functions.empty ())
2298 error (_("No trace."));
2299
2300 bfun = btinfo->functions.back ();
2301 length = VEC_length (btrace_insn_s, bfun->insn);
2302
2303 /* The last function may either be a gap or it contains the current
2304 instruction, which is one past the end of the execution trace; ignore
2305 it. */
2306 if (length > 0)
2307 length -= 1;
2308
2309 it->btinfo = btinfo;
2310 it->call_index = bfun->number - 1;
2311 it->insn_index = length;
2312 }
2313
2314 /* See btrace.h. */
2315
2316 unsigned int
2317 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2318 {
2319 const struct btrace_function *bfun;
2320 unsigned int index, steps;
2321
2322 bfun = it->btinfo->functions[it->call_index];
2323 steps = 0;
2324 index = it->insn_index;
2325
2326 while (stride != 0)
2327 {
2328 unsigned int end, space, adv;
2329
2330 end = VEC_length (btrace_insn_s, bfun->insn);
2331
2332 /* An empty function segment represents a gap in the trace. We count
2333 it as one instruction. */
2334 if (end == 0)
2335 {
2336 const struct btrace_function *next;
2337
2338 next = bfun->flow.next;
2339 if (next == NULL)
2340 break;
2341
2342 stride -= 1;
2343 steps += 1;
2344
2345 bfun = next;
2346 index = 0;
2347
2348 continue;
2349 }
2350
2351 gdb_assert (0 < end);
2352 gdb_assert (index < end);
2353
2354 /* Compute the number of instructions remaining in this segment. */
2355 space = end - index;
2356
2357 /* Advance the iterator as far as possible within this segment. */
2358 adv = std::min (space, stride);
2359 stride -= adv;
2360 index += adv;
2361 steps += adv;
2362
2363 /* Move to the next function if we're at the end of this one. */
2364 if (index == end)
2365 {
2366 const struct btrace_function *next;
2367
2368 next = bfun->flow.next;
2369 if (next == NULL)
2370 {
2371 /* We stepped past the last function.
2372
2373 Let's adjust the index to point to the last instruction in
2374 the previous function. */
2375 index -= 1;
2376 steps -= 1;
2377 break;
2378 }
2379
2380 /* We now point to the first instruction in the new function. */
2381 bfun = next;
2382 index = 0;
2383 }
2384
2385 /* We did make progress. */
2386 gdb_assert (adv > 0);
2387 }
2388
2389 /* Update the iterator. */
2390 it->call_index = bfun->number - 1;
2391 it->insn_index = index;
2392
2393 return steps;
2394 }
2395
2396 /* See btrace.h. */
2397
2398 unsigned int
2399 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2400 {
2401 const struct btrace_function *bfun;
2402 unsigned int index, steps;
2403
2404 bfun = it->btinfo->functions[it->call_index];
2405 steps = 0;
2406 index = it->insn_index;
2407
2408 while (stride != 0)
2409 {
2410 unsigned int adv;
2411
2412 /* Move to the previous function if we're at the start of this one. */
2413 if (index == 0)
2414 {
2415 const struct btrace_function *prev;
2416
2417 prev = bfun->flow.prev;
2418 if (prev == NULL)
2419 break;
2420
2421 /* We point to one after the last instruction in the new function. */
2422 bfun = prev;
2423 index = VEC_length (btrace_insn_s, bfun->insn);
2424
2425 /* An empty function segment represents a gap in the trace. We count
2426 it as one instruction. */
2427 if (index == 0)
2428 {
2429 stride -= 1;
2430 steps += 1;
2431
2432 continue;
2433 }
2434 }
2435
2436 /* Advance the iterator as far as possible within this segment. */
2437 adv = std::min (index, stride);
2438
2439 stride -= adv;
2440 index -= adv;
2441 steps += adv;
2442
2443 /* We did make progress. */
2444 gdb_assert (adv > 0);
2445 }
2446
2447 /* Update the iterator. */
2448 it->call_index = bfun->number - 1;
2449 it->insn_index = index;
2450
2451 return steps;
2452 }
2453
2454 /* See btrace.h. */
2455
2456 int
2457 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2458 const struct btrace_insn_iterator *rhs)
2459 {
2460 gdb_assert (lhs->btinfo == rhs->btinfo);
2461
2462 if (lhs->call_index != rhs->call_index)
2463 return lhs->call_index - rhs->call_index;
2464
2465 return lhs->insn_index - rhs->insn_index;
2466 }
2467
2468 /* See btrace.h. */
2469
2470 int
2471 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2472 const struct btrace_thread_info *btinfo,
2473 unsigned int number)
2474 {
2475 const struct btrace_function *bfun;
2476 unsigned int upper, lower;
2477
2478 if (btinfo->functions.empty ())
2479 return 0;
2480
2481 lower = 0;
2482 bfun = btinfo->functions[lower];
2483 if (number < bfun->insn_offset)
2484 return 0;
2485
2486 upper = btinfo->functions.size () - 1;
2487 bfun = btinfo->functions[upper];
2488 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2489 return 0;
2490
2491 /* We assume that there are no holes in the numbering. */
2492 for (;;)
2493 {
2494 const unsigned int average = lower + (upper - lower) / 2;
2495
2496 bfun = btinfo->functions[average];
2497
2498 if (number < bfun->insn_offset)
2499 {
2500 upper = average - 1;
2501 continue;
2502 }
2503
2504 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2505 {
2506 lower = average + 1;
2507 continue;
2508 }
2509
2510 break;
2511 }
2512
2513 it->btinfo = btinfo;
2514 it->call_index = bfun->number - 1;
2515 it->insn_index = number - bfun->insn_offset;
2516 return 1;
2517 }
2518
2519 /* Returns true if the recording ends with a function segment that
2520 contains only a single (i.e. the current) instruction. */
2521
2522 static bool
2523 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2524 {
2525 const btrace_function *bfun;
2526
2527 if (btinfo->functions.empty ())
2528 return false;
2529
2530 bfun = btinfo->functions.back ();
2531 if (bfun->errcode != 0)
2532 return false;
2533
2534 return ftrace_call_num_insn (bfun) == 1;
2535 }
2536
2537 /* See btrace.h. */
2538
2539 const struct btrace_function *
2540 btrace_call_get (const struct btrace_call_iterator *it)
2541 {
2542 if (it->index >= it->btinfo->functions.size ())
2543 return NULL;
2544
2545 return it->btinfo->functions[it->index];
2546 }
2547
2548 /* See btrace.h. */
2549
2550 unsigned int
2551 btrace_call_number (const struct btrace_call_iterator *it)
2552 {
2553 const unsigned int length = it->btinfo->functions.size ();
2554
2555 /* If the last function segment contains only a single instruction (i.e. the
2556 current instruction), skip it. */
2557 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2558 return length;
2559
2560 return it->index + 1;
2561 }
2562
2563 /* See btrace.h. */
2564
2565 void
2566 btrace_call_begin (struct btrace_call_iterator *it,
2567 const struct btrace_thread_info *btinfo)
2568 {
2569 if (btinfo->functions.empty ())
2570 error (_("No trace."));
2571
2572 it->btinfo = btinfo;
2573 it->index = 0;
2574 }
2575
2576 /* See btrace.h. */
2577
2578 void
2579 btrace_call_end (struct btrace_call_iterator *it,
2580 const struct btrace_thread_info *btinfo)
2581 {
2582 if (btinfo->functions.empty ())
2583 error (_("No trace."));
2584
2585 it->btinfo = btinfo;
2586 it->index = btinfo->functions.size ();
2587 }
2588
2589 /* See btrace.h. */
2590
2591 unsigned int
2592 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2593 {
2594 const unsigned int length = it->btinfo->functions.size ();
2595
2596 if (it->index + stride < length - 1)
2597 /* Default case: Simply advance the iterator. */
2598 it->index += stride;
2599 else if (it->index + stride == length - 1)
2600 {
2601 /* We land exactly at the last function segment. If it contains only one
2602 instruction (i.e. the current instruction) it is not actually part of
2603 the trace. */
2604 if (btrace_ends_with_single_insn (it->btinfo))
2605 it->index = length;
2606 else
2607 it->index = length - 1;
2608 }
2609 else
2610 {
2611 /* We land past the last function segment and have to adjust the stride.
2612 If the last function segment contains only one instruction (i.e. the
2613 current instruction) it is not actually part of the trace. */
2614 if (btrace_ends_with_single_insn (it->btinfo))
2615 stride = length - it->index - 1;
2616 else
2617 stride = length - it->index;
2618
2619 it->index = length;
2620 }
2621
2622 return stride;
2623 }
2624
2625 /* See btrace.h. */
2626
2627 unsigned int
2628 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2629 {
2630 const unsigned int length = it->btinfo->functions.size ();
2631 int steps = 0;
2632
2633 gdb_assert (it->index <= length);
2634
2635 if (stride == 0 || it->index == 0)
2636 return 0;
2637
2638 /* If we are at the end, the first step is a special case. If the last
2639 function segment contains only one instruction (i.e. the current
2640 instruction) it is not actually part of the trace. To be able to step
2641 over this instruction, we need at least one more function segment. */
2642 if ((it->index == length) && (length > 1))
2643 {
2644 if (btrace_ends_with_single_insn (it->btinfo))
2645 it->index = length - 2;
2646 else
2647 it->index = length - 1;
2648
2649 steps = 1;
2650 stride -= 1;
2651 }
2652
2653 stride = std::min (stride, it->index);
2654
2655 it->index -= stride;
2656 return steps + stride;
2657 }
2658
2659 /* See btrace.h. */
2660
2661 int
2662 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2663 const struct btrace_call_iterator *rhs)
2664 {
2665 gdb_assert (lhs->btinfo == rhs->btinfo);
2666 return (int) (lhs->index - rhs->index);
2667 }
2668
2669 /* See btrace.h. */
2670
2671 int
2672 btrace_find_call_by_number (struct btrace_call_iterator *it,
2673 const struct btrace_thread_info *btinfo,
2674 unsigned int number)
2675 {
2676 const unsigned int length = btinfo->functions.size ();
2677
2678 if ((number == 0) || (number > length))
2679 return 0;
2680
2681 it->btinfo = btinfo;
2682 it->index = number - 1;
2683 return 1;
2684 }
2685
2686 /* See btrace.h. */
2687
2688 void
2689 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2690 const struct btrace_insn_iterator *begin,
2691 const struct btrace_insn_iterator *end)
2692 {
2693 if (btinfo->insn_history == NULL)
2694 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2695
2696 btinfo->insn_history->begin = *begin;
2697 btinfo->insn_history->end = *end;
2698 }
2699
2700 /* See btrace.h. */
2701
2702 void
2703 btrace_set_call_history (struct btrace_thread_info *btinfo,
2704 const struct btrace_call_iterator *begin,
2705 const struct btrace_call_iterator *end)
2706 {
2707 gdb_assert (begin->btinfo == end->btinfo);
2708
2709 if (btinfo->call_history == NULL)
2710 btinfo->call_history = XCNEW (struct btrace_call_history);
2711
2712 btinfo->call_history->begin = *begin;
2713 btinfo->call_history->end = *end;
2714 }
2715
2716 /* See btrace.h. */
2717
2718 int
2719 btrace_is_replaying (struct thread_info *tp)
2720 {
2721 return tp->btrace.replay != NULL;
2722 }
2723
2724 /* See btrace.h. */
2725
2726 int
2727 btrace_is_empty (struct thread_info *tp)
2728 {
2729 struct btrace_insn_iterator begin, end;
2730 struct btrace_thread_info *btinfo;
2731
2732 btinfo = &tp->btrace;
2733
2734 if (btinfo->functions.empty ())
2735 return 1;
2736
2737 btrace_insn_begin (&begin, btinfo);
2738 btrace_insn_end (&end, btinfo);
2739
2740 return btrace_insn_cmp (&begin, &end) == 0;
2741 }
2742
2743 /* Forward the cleanup request. */
2744
2745 static void
2746 do_btrace_data_cleanup (void *arg)
2747 {
2748 btrace_data_fini ((struct btrace_data *) arg);
2749 }
2750
2751 /* See btrace.h. */
2752
2753 struct cleanup *
2754 make_cleanup_btrace_data (struct btrace_data *data)
2755 {
2756 return make_cleanup (do_btrace_data_cleanup, data);
2757 }
2758
2759 #if defined (HAVE_LIBIPT)
2760
2761 /* Print a single packet. */
2762
2763 static void
2764 pt_print_packet (const struct pt_packet *packet)
2765 {
2766 switch (packet->type)
2767 {
2768 default:
2769 printf_unfiltered (("[??: %x]"), packet->type);
2770 break;
2771
2772 case ppt_psb:
2773 printf_unfiltered (("psb"));
2774 break;
2775
2776 case ppt_psbend:
2777 printf_unfiltered (("psbend"));
2778 break;
2779
2780 case ppt_pad:
2781 printf_unfiltered (("pad"));
2782 break;
2783
2784 case ppt_tip:
2785 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2786 packet->payload.ip.ipc,
2787 packet->payload.ip.ip);
2788 break;
2789
2790 case ppt_tip_pge:
2791 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2792 packet->payload.ip.ipc,
2793 packet->payload.ip.ip);
2794 break;
2795
2796 case ppt_tip_pgd:
2797 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2798 packet->payload.ip.ipc,
2799 packet->payload.ip.ip);
2800 break;
2801
2802 case ppt_fup:
2803 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2804 packet->payload.ip.ipc,
2805 packet->payload.ip.ip);
2806 break;
2807
2808 case ppt_tnt_8:
2809 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2810 packet->payload.tnt.bit_size,
2811 packet->payload.tnt.payload);
2812 break;
2813
2814 case ppt_tnt_64:
2815 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2816 packet->payload.tnt.bit_size,
2817 packet->payload.tnt.payload);
2818 break;
2819
2820 case ppt_pip:
2821 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2822 packet->payload.pip.nr ? (" nr") : (""));
2823 break;
2824
2825 case ppt_tsc:
2826 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2827 break;
2828
2829 case ppt_cbr:
2830 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2831 break;
2832
2833 case ppt_mode:
2834 switch (packet->payload.mode.leaf)
2835 {
2836 default:
2837 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2838 break;
2839
2840 case pt_mol_exec:
2841 printf_unfiltered (("mode.exec%s%s"),
2842 packet->payload.mode.bits.exec.csl
2843 ? (" cs.l") : (""),
2844 packet->payload.mode.bits.exec.csd
2845 ? (" cs.d") : (""));
2846 break;
2847
2848 case pt_mol_tsx:
2849 printf_unfiltered (("mode.tsx%s%s"),
2850 packet->payload.mode.bits.tsx.intx
2851 ? (" intx") : (""),
2852 packet->payload.mode.bits.tsx.abrt
2853 ? (" abrt") : (""));
2854 break;
2855 }
2856 break;
2857
2858 case ppt_ovf:
2859 printf_unfiltered (("ovf"));
2860 break;
2861
2862 case ppt_stop:
2863 printf_unfiltered (("stop"));
2864 break;
2865
2866 case ppt_vmcs:
2867 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2868 break;
2869
2870 case ppt_tma:
2871 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2872 packet->payload.tma.fc);
2873 break;
2874
2875 case ppt_mtc:
2876 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2877 break;
2878
2879 case ppt_cyc:
2880 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2881 break;
2882
2883 case ppt_mnt:
2884 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2885 break;
2886 }
2887 }
2888
2889 /* Decode packets into MAINT using DECODER. */
2890
2891 static void
2892 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2893 struct pt_packet_decoder *decoder)
2894 {
2895 int errcode;
2896
2897 for (;;)
2898 {
2899 struct btrace_pt_packet packet;
2900
2901 errcode = pt_pkt_sync_forward (decoder);
2902 if (errcode < 0)
2903 break;
2904
2905 for (;;)
2906 {
2907 pt_pkt_get_offset (decoder, &packet.offset);
2908
2909 errcode = pt_pkt_next (decoder, &packet.packet,
2910 sizeof(packet.packet));
2911 if (errcode < 0)
2912 break;
2913
2914 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2915 {
2916 packet.errcode = pt_errcode (errcode);
2917 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2918 &packet);
2919 }
2920 }
2921
2922 if (errcode == -pte_eos)
2923 break;
2924
2925 packet.errcode = pt_errcode (errcode);
2926 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2927 &packet);
2928
2929 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2930 packet.offset, pt_errstr (packet.errcode));
2931 }
2932
2933 if (errcode != -pte_eos)
2934 warning (_("Failed to synchronize onto the Intel Processor Trace "
2935 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2936 }
2937
2938 /* Update the packet history in BTINFO. */
2939
2940 static void
2941 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2942 {
2943 volatile struct gdb_exception except;
2944 struct pt_packet_decoder *decoder;
2945 struct btrace_data_pt *pt;
2946 struct pt_config config;
2947 int errcode;
2948
2949 pt = &btinfo->data.variant.pt;
2950
2951 /* Nothing to do if there is no trace. */
2952 if (pt->size == 0)
2953 return;
2954
2955 memset (&config, 0, sizeof(config));
2956
2957 config.size = sizeof (config);
2958 config.begin = pt->data;
2959 config.end = pt->data + pt->size;
2960
2961 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2962 config.cpu.family = pt->config.cpu.family;
2963 config.cpu.model = pt->config.cpu.model;
2964 config.cpu.stepping = pt->config.cpu.stepping;
2965
2966 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2967 if (errcode < 0)
2968 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
2969 pt_errstr (pt_errcode (errcode)));
2970
2971 decoder = pt_pkt_alloc_decoder (&config);
2972 if (decoder == NULL)
2973 error (_("Failed to allocate the Intel Processor Trace decoder."));
2974
2975 TRY
2976 {
2977 btrace_maint_decode_pt (&btinfo->maint, decoder);
2978 }
2979 CATCH (except, RETURN_MASK_ALL)
2980 {
2981 pt_pkt_free_decoder (decoder);
2982
2983 if (except.reason < 0)
2984 throw_exception (except);
2985 }
2986 END_CATCH
2987
2988 pt_pkt_free_decoder (decoder);
2989 }
2990
2991 #endif /* !defined (HAVE_LIBIPT) */
2992
2993 /* Update the packet maintenance information for BTINFO and store the
2994 low and high bounds into BEGIN and END, respectively.
2995 Store the current iterator state into FROM and TO. */
2996
2997 static void
2998 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2999 unsigned int *begin, unsigned int *end,
3000 unsigned int *from, unsigned int *to)
3001 {
3002 switch (btinfo->data.format)
3003 {
3004 default:
3005 *begin = 0;
3006 *end = 0;
3007 *from = 0;
3008 *to = 0;
3009 break;
3010
3011 case BTRACE_FORMAT_BTS:
3012 /* Nothing to do - we operate directly on BTINFO->DATA. */
3013 *begin = 0;
3014 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3015 *from = btinfo->maint.variant.bts.packet_history.begin;
3016 *to = btinfo->maint.variant.bts.packet_history.end;
3017 break;
3018
3019 #if defined (HAVE_LIBIPT)
3020 case BTRACE_FORMAT_PT:
3021 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3022 btrace_maint_update_pt_packets (btinfo);
3023
3024 *begin = 0;
3025 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3026 *from = btinfo->maint.variant.pt.packet_history.begin;
3027 *to = btinfo->maint.variant.pt.packet_history.end;
3028 break;
3029 #endif /* defined (HAVE_LIBIPT) */
3030 }
3031 }
3032
3033 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3034 update the current iterator position. */
3035
3036 static void
3037 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3038 unsigned int begin, unsigned int end)
3039 {
3040 switch (btinfo->data.format)
3041 {
3042 default:
3043 break;
3044
3045 case BTRACE_FORMAT_BTS:
3046 {
3047 VEC (btrace_block_s) *blocks;
3048 unsigned int blk;
3049
3050 blocks = btinfo->data.variant.bts.blocks;
3051 for (blk = begin; blk < end; ++blk)
3052 {
3053 const btrace_block_s *block;
3054
3055 block = VEC_index (btrace_block_s, blocks, blk);
3056
3057 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3058 core_addr_to_string_nz (block->begin),
3059 core_addr_to_string_nz (block->end));
3060 }
3061
3062 btinfo->maint.variant.bts.packet_history.begin = begin;
3063 btinfo->maint.variant.bts.packet_history.end = end;
3064 }
3065 break;
3066
3067 #if defined (HAVE_LIBIPT)
3068 case BTRACE_FORMAT_PT:
3069 {
3070 VEC (btrace_pt_packet_s) *packets;
3071 unsigned int pkt;
3072
3073 packets = btinfo->maint.variant.pt.packets;
3074 for (pkt = begin; pkt < end; ++pkt)
3075 {
3076 const struct btrace_pt_packet *packet;
3077
3078 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3079
3080 printf_unfiltered ("%u\t", pkt);
3081 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3082
3083 if (packet->errcode == pte_ok)
3084 pt_print_packet (&packet->packet);
3085 else
3086 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3087
3088 printf_unfiltered ("\n");
3089 }
3090
3091 btinfo->maint.variant.pt.packet_history.begin = begin;
3092 btinfo->maint.variant.pt.packet_history.end = end;
3093 }
3094 break;
3095 #endif /* defined (HAVE_LIBIPT) */
3096 }
3097 }
3098
3099 /* Read a number from an argument string. */
3100
3101 static unsigned int
3102 get_uint (char **arg)
3103 {
3104 char *begin, *end, *pos;
3105 unsigned long number;
3106
3107 begin = *arg;
3108 pos = skip_spaces (begin);
3109
3110 if (!isdigit (*pos))
3111 error (_("Expected positive number, got: %s."), pos);
3112
3113 number = strtoul (pos, &end, 10);
3114 if (number > UINT_MAX)
3115 error (_("Number too big."));
3116
3117 *arg += (end - begin);
3118
3119 return (unsigned int) number;
3120 }
3121
3122 /* Read a context size from an argument string. */
3123
3124 static int
3125 get_context_size (char **arg)
3126 {
3127 char *pos;
3128 int number;
3129
3130 pos = skip_spaces (*arg);
3131
3132 if (!isdigit (*pos))
3133 error (_("Expected positive number, got: %s."), pos);
3134
3135 return strtol (pos, arg, 10);
3136 }
3137
3138 /* Complain about junk at the end of an argument string. */
3139
3140 static void
3141 no_chunk (char *arg)
3142 {
3143 if (*arg != 0)
3144 error (_("Junk after argument: %s."), arg);
3145 }
3146
3147 /* The "maintenance btrace packet-history" command. */
3148
3149 static void
3150 maint_btrace_packet_history_cmd (char *arg, int from_tty)
3151 {
3152 struct btrace_thread_info *btinfo;
3153 struct thread_info *tp;
3154 unsigned int size, begin, end, from, to;
3155
3156 tp = find_thread_ptid (inferior_ptid);
3157 if (tp == NULL)
3158 error (_("No thread."));
3159
3160 size = 10;
3161 btinfo = &tp->btrace;
3162
3163 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3164 if (begin == end)
3165 {
3166 printf_unfiltered (_("No trace.\n"));
3167 return;
3168 }
3169
3170 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3171 {
3172 from = to;
3173
3174 if (end - from < size)
3175 size = end - from;
3176 to = from + size;
3177 }
3178 else if (strcmp (arg, "-") == 0)
3179 {
3180 to = from;
3181
3182 if (to - begin < size)
3183 size = to - begin;
3184 from = to - size;
3185 }
3186 else
3187 {
3188 from = get_uint (&arg);
3189 if (end <= from)
3190 error (_("'%u' is out of range."), from);
3191
3192 arg = skip_spaces (arg);
3193 if (*arg == ',')
3194 {
3195 arg = skip_spaces (++arg);
3196
3197 if (*arg == '+')
3198 {
3199 arg += 1;
3200 size = get_context_size (&arg);
3201
3202 no_chunk (arg);
3203
3204 if (end - from < size)
3205 size = end - from;
3206 to = from + size;
3207 }
3208 else if (*arg == '-')
3209 {
3210 arg += 1;
3211 size = get_context_size (&arg);
3212
3213 no_chunk (arg);
3214
3215 /* Include the packet given as first argument. */
3216 from += 1;
3217 to = from;
3218
3219 if (to - begin < size)
3220 size = to - begin;
3221 from = to - size;
3222 }
3223 else
3224 {
3225 to = get_uint (&arg);
3226
3227 /* Include the packet at the second argument and silently
3228 truncate the range. */
3229 if (to < end)
3230 to += 1;
3231 else
3232 to = end;
3233
3234 no_chunk (arg);
3235 }
3236 }
3237 else
3238 {
3239 no_chunk (arg);
3240
3241 if (end - from < size)
3242 size = end - from;
3243 to = from + size;
3244 }
3245
3246 dont_repeat ();
3247 }
3248
3249 btrace_maint_print_packets (btinfo, from, to);
3250 }
3251
3252 /* The "maintenance btrace clear-packet-history" command. */
3253
3254 static void
3255 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3256 {
3257 struct btrace_thread_info *btinfo;
3258 struct thread_info *tp;
3259
3260 if (args != NULL && *args != 0)
3261 error (_("Invalid argument."));
3262
3263 tp = find_thread_ptid (inferior_ptid);
3264 if (tp == NULL)
3265 error (_("No thread."));
3266
3267 btinfo = &tp->btrace;
3268
3269 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3270 btrace_maint_clear (btinfo);
3271 btrace_data_clear (&btinfo->data);
3272 }
3273
3274 /* The "maintenance btrace clear" command. */
3275
3276 static void
3277 maint_btrace_clear_cmd (char *args, int from_tty)
3278 {
3279 struct btrace_thread_info *btinfo;
3280 struct thread_info *tp;
3281
3282 if (args != NULL && *args != 0)
3283 error (_("Invalid argument."));
3284
3285 tp = find_thread_ptid (inferior_ptid);
3286 if (tp == NULL)
3287 error (_("No thread."));
3288
3289 btrace_clear (tp);
3290 }
3291
3292 /* The "maintenance btrace" command. */
3293
3294 static void
3295 maint_btrace_cmd (char *args, int from_tty)
3296 {
3297 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3298 gdb_stdout);
3299 }
3300
3301 /* The "maintenance set btrace" command. */
3302
3303 static void
3304 maint_btrace_set_cmd (char *args, int from_tty)
3305 {
3306 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3307 gdb_stdout);
3308 }
3309
3310 /* The "maintenance show btrace" command. */
3311
3312 static void
3313 maint_btrace_show_cmd (char *args, int from_tty)
3314 {
3315 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3316 all_commands, gdb_stdout);
3317 }
3318
3319 /* The "maintenance set btrace pt" command. */
3320
3321 static void
3322 maint_btrace_pt_set_cmd (char *args, int from_tty)
3323 {
3324 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3325 all_commands, gdb_stdout);
3326 }
3327
3328 /* The "maintenance show btrace pt" command. */
3329
3330 static void
3331 maint_btrace_pt_show_cmd (char *args, int from_tty)
3332 {
3333 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3334 all_commands, gdb_stdout);
3335 }
3336
3337 /* The "maintenance info btrace" command. */
3338
3339 static void
3340 maint_info_btrace_cmd (char *args, int from_tty)
3341 {
3342 struct btrace_thread_info *btinfo;
3343 struct thread_info *tp;
3344 const struct btrace_config *conf;
3345
3346 if (args != NULL && *args != 0)
3347 error (_("Invalid argument."));
3348
3349 tp = find_thread_ptid (inferior_ptid);
3350 if (tp == NULL)
3351 error (_("No thread."));
3352
3353 btinfo = &tp->btrace;
3354
3355 conf = btrace_conf (btinfo);
3356 if (conf == NULL)
3357 error (_("No btrace configuration."));
3358
3359 printf_unfiltered (_("Format: %s.\n"),
3360 btrace_format_string (conf->format));
3361
3362 switch (conf->format)
3363 {
3364 default:
3365 break;
3366
3367 case BTRACE_FORMAT_BTS:
3368 printf_unfiltered (_("Number of packets: %u.\n"),
3369 VEC_length (btrace_block_s,
3370 btinfo->data.variant.bts.blocks));
3371 break;
3372
3373 #if defined (HAVE_LIBIPT)
3374 case BTRACE_FORMAT_PT:
3375 {
3376 struct pt_version version;
3377
3378 version = pt_library_version ();
3379 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3380 version.minor, version.build,
3381 version.ext != NULL ? version.ext : "");
3382
3383 btrace_maint_update_pt_packets (btinfo);
3384 printf_unfiltered (_("Number of packets: %u.\n"),
3385 VEC_length (btrace_pt_packet_s,
3386 btinfo->maint.variant.pt.packets));
3387 }
3388 break;
3389 #endif /* defined (HAVE_LIBIPT) */
3390 }
3391 }
3392
3393 /* The "maint show btrace pt skip-pad" show value function. */
3394
3395 static void
3396 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3397 struct cmd_list_element *c,
3398 const char *value)
3399 {
3400 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3401 }
3402
3403
3404 /* Initialize btrace maintenance commands. */
3405
3406 void _initialize_btrace (void);
3407 void
3408 _initialize_btrace (void)
3409 {
3410 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3411 _("Info about branch tracing data."), &maintenanceinfolist);
3412
3413 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3414 _("Branch tracing maintenance commands."),
3415 &maint_btrace_cmdlist, "maintenance btrace ",
3416 0, &maintenancelist);
3417
3418 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3419 Set branch tracing specific variables."),
3420 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3421 0, &maintenance_set_cmdlist);
3422
3423 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
3424 Set Intel Processor Trace specific variables."),
3425 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3426 0, &maint_btrace_set_cmdlist);
3427
3428 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3429 Show branch tracing specific variables."),
3430 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3431 0, &maintenance_show_cmdlist);
3432
3433 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
3434 Show Intel Processor Trace specific variables."),
3435 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3436 0, &maint_btrace_show_cmdlist);
3437
3438 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3439 &maint_btrace_pt_skip_pad, _("\
3440 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3441 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3442 When enabled, PAD packets are ignored in the btrace packet history."),
3443 NULL, show_maint_btrace_pt_skip_pad,
3444 &maint_btrace_pt_set_cmdlist,
3445 &maint_btrace_pt_show_cmdlist);
3446
3447 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3448 _("Print the raw branch tracing data.\n\
3449 With no argument, print ten more packets after the previous ten-line print.\n\
3450 With '-' as argument print ten packets before a previous ten-line print.\n\
3451 One argument specifies the starting packet of a ten-line print.\n\
3452 Two arguments with comma between specify starting and ending packets to \
3453 print.\n\
3454 Preceded with '+'/'-' the second argument specifies the distance from the \
3455 first.\n"),
3456 &maint_btrace_cmdlist);
3457
3458 add_cmd ("clear-packet-history", class_maintenance,
3459 maint_btrace_clear_packet_history_cmd,
3460 _("Clears the branch tracing packet history.\n\
3461 Discards the raw branch tracing data but not the execution history data.\n\
3462 "),
3463 &maint_btrace_cmdlist);
3464
3465 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3466 _("Clears the branch tracing data.\n\
3467 Discards the raw branch tracing data and the execution history data.\n\
3468 The next 'record' command will fetch the branch tracing data anew.\n\
3469 "),
3470 &maint_btrace_cmdlist);
3471
3472 }