]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/btrace.c
b23de887abef7fd950970c490d2430d296f6d78a
[thirdparty/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2025 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "btrace.h"
23 #include "gdbthread.h"
24 #include "inferior.h"
25 #include "target.h"
26 #include "record.h"
27 #include "symtab.h"
28 #include "disasm.h"
29 #include "source.h"
30 #include "filenames.h"
31 #include "regcache.h"
32 #include "gdbsupport/rsp-low.h"
33 #include "cli/cli-cmds.h"
34 #include "cli/cli-utils.h"
35 #include "extension.h"
36 #include "gdbarch.h"
37
38 /* For maintenance commands. */
39 #include "record-btrace.h"
40
41 #include <inttypes.h>
42 #include <ctype.h>
43 #include <algorithm>
44 #include <string>
45
46 /* Command lists for btrace maintenance commands. */
47 static struct cmd_list_element *maint_btrace_cmdlist;
48 static struct cmd_list_element *maint_btrace_set_cmdlist;
49 static struct cmd_list_element *maint_btrace_show_cmdlist;
50 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
51 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
52
53 /* Control whether to skip PAD packets when computing the packet history. */
54 static bool maint_btrace_pt_skip_pad = true;
55
56 static void btrace_add_pc (struct thread_info *tp);
57
58 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
59 when used in if statements. */
60
61 #define DEBUG(msg, args...) \
62 do \
63 { \
64 if (record_debug != 0) \
65 gdb_printf (gdb_stdlog, \
66 "[btrace] " msg "\n", ##args); \
67 } \
68 while (0)
69
70 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
71
72 /* Return the function name of a recorded function segment for printing.
73 This function never returns NULL. */
74
75 static const char *
76 ftrace_print_function_name (const struct btrace_function *bfun)
77 {
78 struct minimal_symbol *msym;
79 struct symbol *sym;
80
81 msym = bfun->msym;
82 sym = bfun->sym;
83
84 if (sym != NULL)
85 return sym->print_name ();
86
87 if (msym != NULL)
88 return msym->print_name ();
89
90 return "<unknown>";
91 }
92
93 /* Return the file name of a recorded function segment for printing.
94 This function never returns NULL. */
95
96 static const char *
97 ftrace_print_filename (const struct btrace_function *bfun)
98 {
99 struct symbol *sym;
100 const char *filename;
101
102 sym = bfun->sym;
103
104 if (sym != NULL)
105 filename = symtab_to_filename_for_display (sym->symtab ());
106 else
107 filename = "<unknown>";
108
109 return filename;
110 }
111
112 /* Return a string representation of the address of an instruction.
113 This function never returns NULL. */
114
115 static const char *
116 ftrace_print_insn_addr (const struct btrace_insn *insn)
117 {
118 if (insn == NULL)
119 return "<nil>";
120
121 return core_addr_to_string_nz (insn->pc);
122 }
123
124 /* Print an ftrace debug status message. */
125
126 static void
127 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
128 {
129 const char *fun, *file;
130 unsigned int ibegin, iend;
131 int level;
132
133 fun = ftrace_print_function_name (bfun);
134 file = ftrace_print_filename (bfun);
135 level = bfun->level;
136
137 ibegin = bfun->insn_offset;
138 iend = ibegin + bfun->insn.size ();
139
140 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
141 prefix, fun, file, level, ibegin, iend);
142 }
143
144 /* Return the number of instructions in a given function call segment. */
145
146 static unsigned int
147 ftrace_call_num_insn (const struct btrace_function* bfun)
148 {
149 if (bfun == NULL)
150 return 0;
151
152 /* A gap is always counted as one instruction. */
153 if (bfun->errcode != 0)
154 return 1;
155
156 return bfun->insn.size ();
157 }
158
159 /* Return the function segment with the given NUMBER or NULL if no such segment
160 exists. BTINFO is the branch trace information for the current thread. */
161
162 static struct btrace_function *
163 ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
164 unsigned int number)
165 {
166 if (number == 0 || number > btinfo->functions.size ())
167 return NULL;
168
169 return &btinfo->functions[number - 1];
170 }
171
172 /* A const version of the function above. */
173
174 static const struct btrace_function *
175 ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
176 unsigned int number)
177 {
178 if (number == 0 || number > btinfo->functions.size ())
179 return NULL;
180
181 return &btinfo->functions[number - 1];
182 }
183
184 /* Return non-zero if BFUN does not match MFUN and FUN,
185 return zero otherwise. */
186
187 static int
188 ftrace_function_switched (const struct btrace_function *bfun,
189 const struct minimal_symbol *mfun,
190 const struct symbol *fun)
191 {
192 struct minimal_symbol *msym;
193 struct symbol *sym;
194
195 msym = bfun->msym;
196 sym = bfun->sym;
197
198 /* If the minimal symbol changed, we certainly switched functions. */
199 if (mfun != NULL && msym != NULL
200 && strcmp (mfun->linkage_name (), msym->linkage_name ()) != 0)
201 return 1;
202
203 /* If the symbol changed, we certainly switched functions. */
204 if (fun != NULL && sym != NULL)
205 {
206 const char *bfname, *fname;
207
208 /* Check the function name. */
209 if (strcmp (fun->linkage_name (), sym->linkage_name ()) != 0)
210 return 1;
211
212 /* Check the location of those functions, as well. */
213 bfname = symtab_to_fullname (sym->symtab ());
214 fname = symtab_to_fullname (fun->symtab ());
215 if (filename_cmp (fname, bfname) != 0)
216 return 1;
217 }
218
219 /* If we lost symbol information, we switched functions. */
220 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
221 return 1;
222
223 /* If we gained symbol information, we switched functions. */
224 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
225 return 1;
226
227 return 0;
228 }
229
230 /* Allocate and initialize a new branch trace function segment at the end of
231 the trace.
232 BTINFO is the branch trace information for the current thread.
233 MFUN and FUN are the symbol information we have for this function.
234 This invalidates all struct btrace_function pointer currently held. */
235
236 static struct btrace_function *
237 ftrace_new_function (struct btrace_thread_info *btinfo,
238 struct minimal_symbol *mfun,
239 struct symbol *fun)
240 {
241 int level;
242 unsigned int number, insn_offset;
243
244 if (btinfo->functions.empty ())
245 {
246 /* Start counting NUMBER and INSN_OFFSET at one. */
247 level = 0;
248 number = 1;
249 insn_offset = 1;
250 }
251 else
252 {
253 const struct btrace_function *prev = &btinfo->functions.back ();
254 level = prev->level;
255 number = prev->number + 1;
256 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
257 }
258
259 return &btinfo->functions.emplace_back (mfun, fun, number, insn_offset,
260 level);
261 }
262
263 /* Update the UP field of a function segment. */
264
265 static void
266 ftrace_update_caller (struct btrace_function *bfun,
267 struct btrace_function *caller,
268 btrace_function_flags flags)
269 {
270 if (bfun->up != 0)
271 ftrace_debug (bfun, "updating caller");
272
273 bfun->up = caller->number;
274 bfun->flags = flags;
275
276 ftrace_debug (bfun, "set caller");
277 ftrace_debug (caller, "..to");
278 }
279
280 /* Fix up the caller for all segments of a function. */
281
282 static void
283 ftrace_fixup_caller (struct btrace_thread_info *btinfo,
284 struct btrace_function *bfun,
285 struct btrace_function *caller,
286 btrace_function_flags flags)
287 {
288 unsigned int prev, next;
289
290 prev = bfun->prev;
291 next = bfun->next;
292 ftrace_update_caller (bfun, caller, flags);
293
294 /* Update all function segments belonging to the same function. */
295 for (; prev != 0; prev = bfun->prev)
296 {
297 bfun = ftrace_find_call_by_number (btinfo, prev);
298 ftrace_update_caller (bfun, caller, flags);
299 }
300
301 for (; next != 0; next = bfun->next)
302 {
303 bfun = ftrace_find_call_by_number (btinfo, next);
304 ftrace_update_caller (bfun, caller, flags);
305 }
306 }
307
308 /* Add a new function segment for a call at the end of the trace.
309 BTINFO is the branch trace information for the current thread.
310 MFUN and FUN are the symbol information we have for this function. */
311
312 static struct btrace_function *
313 ftrace_new_call (struct btrace_thread_info *btinfo,
314 struct minimal_symbol *mfun,
315 struct symbol *fun)
316 {
317 const unsigned int length = btinfo->functions.size ();
318 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
319
320 bfun->up = length;
321 bfun->level += 1;
322
323 ftrace_debug (bfun, "new call");
324
325 return bfun;
326 }
327
328 /* Add a new function segment for a tail call at the end of the trace.
329 BTINFO is the branch trace information for the current thread.
330 MFUN and FUN are the symbol information we have for this function. */
331
332 static struct btrace_function *
333 ftrace_new_tailcall (struct btrace_thread_info *btinfo,
334 struct minimal_symbol *mfun,
335 struct symbol *fun)
336 {
337 const unsigned int length = btinfo->functions.size ();
338 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
339
340 bfun->up = length;
341 bfun->level += 1;
342 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
343
344 ftrace_debug (bfun, "new tail call");
345
346 return bfun;
347 }
348
349 /* Return the caller of BFUN or NULL if there is none. This function skips
350 tail calls in the call chain. BTINFO is the branch trace information for
351 the current thread. */
352 static struct btrace_function *
353 ftrace_get_caller (struct btrace_thread_info *btinfo,
354 struct btrace_function *bfun)
355 {
356 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
357 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
358 return ftrace_find_call_by_number (btinfo, bfun->up);
359
360 return NULL;
361 }
362
363 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
364 symbol information. BTINFO is the branch trace information for the current
365 thread. */
366
367 static struct btrace_function *
368 ftrace_find_caller (struct btrace_thread_info *btinfo,
369 struct btrace_function *bfun,
370 struct minimal_symbol *mfun,
371 struct symbol *fun)
372 {
373 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
374 {
375 /* Skip functions with incompatible symbol information. */
376 if (ftrace_function_switched (bfun, mfun, fun))
377 continue;
378
379 /* This is the function segment we're looking for. */
380 break;
381 }
382
383 return bfun;
384 }
385
386 /* Find the innermost caller in the back trace of BFUN, skipping all
387 function segments that do not end with a call instruction (e.g.
388 tail calls ending with a jump). BTINFO is the branch trace information for
389 the current thread. */
390
391 static struct btrace_function *
392 ftrace_find_call (struct btrace_thread_info *btinfo,
393 struct btrace_function *bfun)
394 {
395 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
396 {
397 /* Skip gaps. */
398 if (bfun->errcode != 0)
399 continue;
400
401 btrace_insn &last = bfun->insn.back ();
402
403 if (last.iclass == BTRACE_INSN_CALL)
404 break;
405 }
406
407 return bfun;
408 }
409
410 /* Add a continuation segment for a function into which we return at the end of
411 the trace.
412 BTINFO is the branch trace information for the current thread.
413 MFUN and FUN are the symbol information we have for this function. */
414
415 static struct btrace_function *
416 ftrace_new_return (struct btrace_thread_info *btinfo,
417 struct minimal_symbol *mfun,
418 struct symbol *fun)
419 {
420 struct btrace_function *prev, *bfun, *caller;
421
422 bfun = ftrace_new_function (btinfo, mfun, fun);
423 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
424
425 /* It is important to start at PREV's caller. Otherwise, we might find
426 PREV itself, if PREV is a recursive function. */
427 caller = ftrace_find_call_by_number (btinfo, prev->up);
428 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
429 if (caller != NULL)
430 {
431 /* The caller of PREV is the preceding btrace function segment in this
432 function instance. */
433 gdb_assert (caller->next == 0);
434
435 caller->next = bfun->number;
436 bfun->prev = caller->number;
437
438 /* Maintain the function level. */
439 bfun->level = caller->level;
440
441 /* Maintain the call stack. */
442 bfun->up = caller->up;
443 bfun->flags = caller->flags;
444
445 ftrace_debug (bfun, "new return");
446 }
447 else
448 {
449 /* We did not find a caller. This could mean that something went
450 wrong or that the call is simply not included in the trace. */
451
452 /* Let's search for some actual call. */
453 caller = ftrace_find_call_by_number (btinfo, prev->up);
454 caller = ftrace_find_call (btinfo, caller);
455 if (caller == NULL)
456 {
457 /* There is no call in PREV's back trace. We assume that the
458 branch trace did not include it. */
459
460 /* Let's find the topmost function and add a new caller for it.
461 This should handle a series of initial tail calls. */
462 while (prev->up != 0)
463 prev = ftrace_find_call_by_number (btinfo, prev->up);
464
465 bfun->level = prev->level - 1;
466
467 /* Fix up the call stack for PREV. */
468 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
469
470 ftrace_debug (bfun, "new return - no caller");
471 }
472 else
473 {
474 /* There is a call in PREV's back trace to which we should have
475 returned but didn't. Let's start a new, separate back trace
476 from PREV's level. */
477 bfun->level = prev->level - 1;
478
479 /* We fix up the back trace for PREV but leave other function segments
480 on the same level as they are.
481 This should handle things like schedule () correctly where we're
482 switching contexts. */
483 prev->up = bfun->number;
484 prev->flags = BFUN_UP_LINKS_TO_RET;
485
486 ftrace_debug (bfun, "new return - unknown caller");
487 }
488 }
489
490 return bfun;
491 }
492
493 /* Add a new function segment for a function switch at the end of the trace.
494 BTINFO is the branch trace information for the current thread.
495 MFUN and FUN are the symbol information we have for this function. */
496
497 static struct btrace_function *
498 ftrace_new_switch (struct btrace_thread_info *btinfo,
499 struct minimal_symbol *mfun,
500 struct symbol *fun)
501 {
502 struct btrace_function *prev, *bfun;
503
504 /* This is an unexplained function switch. We can't really be sure about the
505 call stack, yet the best I can think of right now is to preserve it. */
506 bfun = ftrace_new_function (btinfo, mfun, fun);
507 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
508 bfun->up = prev->up;
509 bfun->flags = prev->flags;
510
511 ftrace_debug (bfun, "new switch");
512
513 return bfun;
514 }
515
516 /* Add a new function segment for a gap in the trace due to a decode error at
517 the end of the trace.
518 BTINFO is the branch trace information for the current thread.
519 ERRCODE is the format-specific error code. */
520
521 static struct btrace_function *
522 ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
523 std::vector<unsigned int> &gaps)
524 {
525 struct btrace_function *bfun;
526
527 if (btinfo->functions.empty ())
528 bfun = ftrace_new_function (btinfo, NULL, NULL);
529 else
530 {
531 /* We hijack the previous function segment if it was empty. */
532 bfun = &btinfo->functions.back ();
533 if (bfun->errcode != 0 || !bfun->insn.empty ())
534 bfun = ftrace_new_function (btinfo, NULL, NULL);
535 }
536
537 bfun->errcode = errcode;
538 gaps.push_back (bfun->number);
539
540 ftrace_debug (bfun, "new gap");
541
542 return bfun;
543 }
544
545 /* Update the current function segment at the end of the trace in BTINFO with
546 respect to the instruction at PC. This may create new function segments.
547 Return the chronologically latest function segment, never NULL. */
548
549 static struct btrace_function *
550 ftrace_update_function (struct btrace_thread_info *btinfo,
551 std::optional<CORE_ADDR> pc)
552 {
553 struct minimal_symbol *mfun = nullptr;
554 struct symbol *fun = nullptr;
555
556 /* Try to determine the function we're in. We use both types of symbols
557 to avoid surprises when we sometimes get a full symbol and sometimes
558 only a minimal symbol. */
559 if (pc.has_value ())
560 {
561 fun = find_pc_function (*pc);
562 bound_minimal_symbol bmfun = lookup_minimal_symbol_by_pc (*pc);
563 mfun = bmfun.minsym;
564
565 if (fun == nullptr && mfun == nullptr)
566 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (*pc));
567 }
568
569 /* If we didn't have a function, we create one. */
570 if (btinfo->functions.empty ())
571 return ftrace_new_function (btinfo, mfun, fun);
572
573 /* If we had a gap before, we create a function. */
574 btrace_function *bfun = &btinfo->functions.back ();
575 if (bfun->errcode != 0)
576 return ftrace_new_function (btinfo, mfun, fun);
577
578 /* If there is no valid PC, which can happen for events with a
579 suppressed IP, we can't do more than return the last bfun. */
580 if (!pc.has_value ())
581 return bfun;
582
583 /* Check the last instruction, if we have one.
584 We do this check first, since it allows us to fill in the call stack
585 links in addition to the normal flow links. */
586 btrace_insn *last = NULL;
587 if (!bfun->insn.empty ())
588 last = &bfun->insn.back ();
589
590 if (last != NULL)
591 {
592 switch (last->iclass)
593 {
594 case BTRACE_INSN_RETURN:
595 {
596 const char *fname;
597
598 /* On some systems, _dl_runtime_resolve returns to the resolved
599 function instead of jumping to it. From our perspective,
600 however, this is a tailcall.
601 If we treated it as return, we wouldn't be able to find the
602 resolved function in our stack back trace. Hence, we would
603 lose the current stack back trace and start anew with an empty
604 back trace. When the resolved function returns, we would then
605 create a stack back trace with the same function names but
606 different frame id's. This will confuse stepping. */
607 fname = ftrace_print_function_name (bfun);
608 if (strcmp (fname, "_dl_runtime_resolve") == 0)
609 return ftrace_new_tailcall (btinfo, mfun, fun);
610
611 return ftrace_new_return (btinfo, mfun, fun);
612 }
613
614 case BTRACE_INSN_CALL:
615 /* Ignore calls to the next instruction. They are used for PIC. */
616 if (last->pc + last->size == *pc)
617 break;
618
619 return ftrace_new_call (btinfo, mfun, fun);
620
621 case BTRACE_INSN_JUMP:
622 {
623 CORE_ADDR start;
624
625 start = get_pc_function_start (*pc);
626
627 /* A jump to the start of a function is (typically) a tail call. */
628 if (start == *pc)
629 return ftrace_new_tailcall (btinfo, mfun, fun);
630
631 /* Some versions of _Unwind_RaiseException use an indirect
632 jump to 'return' to the exception handler of the caller
633 handling the exception instead of a return. Let's restrict
634 this heuristic to that and related functions. */
635 const char *fname = ftrace_print_function_name (bfun);
636 if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
637 {
638 struct btrace_function *caller
639 = ftrace_find_call_by_number (btinfo, bfun->up);
640 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
641 if (caller != NULL)
642 return ftrace_new_return (btinfo, mfun, fun);
643 }
644
645 /* If we can't determine the function for PC, we treat a jump at
646 the end of the block as tail call if we're switching functions
647 and as an intra-function branch if we don't. */
648 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
649 return ftrace_new_tailcall (btinfo, mfun, fun);
650
651 break;
652 }
653
654 case BTRACE_INSN_AUX:
655 /* An aux insn couldn't have switched the function. But the
656 segment might not have had a symbol name resolved yet, as events
657 might not have an IP. Use the current IP in that case and update
658 the name. */
659 if (bfun->sym == nullptr && bfun->msym == nullptr)
660 {
661 bfun->sym = fun;
662 bfun->msym = mfun;
663 }
664 break;
665 }
666 }
667
668 /* Check if we're switching functions for some other reason. */
669 if (ftrace_function_switched (bfun, mfun, fun))
670 {
671 DEBUG_FTRACE ("switching from %s in %s at %s",
672 ftrace_print_insn_addr (last),
673 ftrace_print_function_name (bfun),
674 ftrace_print_filename (bfun));
675
676 return ftrace_new_switch (btinfo, mfun, fun);
677 }
678
679 return bfun;
680 }
681
682 /* Add the instruction at PC to BFUN's instructions. */
683
684 static void
685 ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
686 {
687 bfun->insn.push_back (insn);
688
689 if (insn.iclass == BTRACE_INSN_AUX)
690 bfun->flags |= BFUN_CONTAINS_AUX;
691 else
692 bfun->flags |= BFUN_CONTAINS_NON_AUX;
693
694 if (record_debug > 1)
695 ftrace_debug (bfun, "update insn");
696 }
697
698 /* Classify the instruction at PC. */
699
700 static enum btrace_insn_class
701 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
702 {
703 enum btrace_insn_class iclass;
704
705 iclass = BTRACE_INSN_OTHER;
706 try
707 {
708 if (gdbarch_insn_is_call (gdbarch, pc))
709 iclass = BTRACE_INSN_CALL;
710 else if (gdbarch_insn_is_ret (gdbarch, pc))
711 iclass = BTRACE_INSN_RETURN;
712 else if (gdbarch_insn_is_jump (gdbarch, pc))
713 iclass = BTRACE_INSN_JUMP;
714 }
715 catch (const gdb_exception_error &error)
716 {
717 }
718
719 return iclass;
720 }
721
722 /* Try to match the back trace at LHS to the back trace at RHS. Returns the
723 number of matching function segments or zero if the back traces do not
724 match. BTINFO is the branch trace information for the current thread. */
725
726 static int
727 ftrace_match_backtrace (struct btrace_thread_info *btinfo,
728 struct btrace_function *lhs,
729 struct btrace_function *rhs)
730 {
731 int matches;
732
733 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
734 {
735 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
736 return 0;
737
738 lhs = ftrace_get_caller (btinfo, lhs);
739 rhs = ftrace_get_caller (btinfo, rhs);
740 }
741
742 return matches;
743 }
744
745 /* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
746 BTINFO is the branch trace information for the current thread. */
747
748 static void
749 ftrace_fixup_level (struct btrace_thread_info *btinfo,
750 struct btrace_function *bfun, int adjustment)
751 {
752 if (adjustment == 0)
753 return;
754
755 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
756 ftrace_debug (bfun, "..bfun");
757
758 while (bfun != NULL)
759 {
760 bfun->level += adjustment;
761 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
762 }
763 }
764
765 /* Recompute the global level offset. Traverse the function trace and compute
766 the global level offset as the negative of the minimal function level. */
767
768 static void
769 ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
770 {
771 int level = INT_MAX;
772
773 if (btinfo == NULL)
774 return;
775
776 if (btinfo->functions.empty ())
777 return;
778
779 unsigned int length = btinfo->functions.size() - 1;
780 for (unsigned int i = 0; i < length; ++i)
781 level = std::min (level, btinfo->functions[i].level);
782
783 /* The last function segment contains the current instruction, which is not
784 really part of the trace. If it contains just this one instruction, we
785 ignore the segment. */
786 struct btrace_function *last = &btinfo->functions.back();
787 if (last->insn.size () != 1)
788 level = std::min (level, last->level);
789
790 DEBUG_FTRACE ("setting global level offset: %d", -level);
791 btinfo->level = -level;
792 }
793
794 /* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
795 ftrace_connect_backtrace. BTINFO is the branch trace information for the
796 current thread. */
797
798 static void
799 ftrace_connect_bfun (struct btrace_thread_info *btinfo,
800 struct btrace_function *prev,
801 struct btrace_function *next)
802 {
803 DEBUG_FTRACE ("connecting...");
804 ftrace_debug (prev, "..prev");
805 ftrace_debug (next, "..next");
806
807 /* The function segments are not yet connected. */
808 gdb_assert (prev->next == 0);
809 gdb_assert (next->prev == 0);
810
811 prev->next = next->number;
812 next->prev = prev->number;
813
814 /* We may have moved NEXT to a different function level. */
815 ftrace_fixup_level (btinfo, next, prev->level - next->level);
816
817 /* If we run out of back trace for one, let's use the other's. */
818 if (prev->up == 0)
819 {
820 const btrace_function_flags flags = next->flags;
821
822 next = ftrace_find_call_by_number (btinfo, next->up);
823 if (next != NULL)
824 {
825 DEBUG_FTRACE ("using next's callers");
826 ftrace_fixup_caller (btinfo, prev, next, flags);
827 }
828 }
829 else if (next->up == 0)
830 {
831 const btrace_function_flags flags = prev->flags;
832
833 prev = ftrace_find_call_by_number (btinfo, prev->up);
834 if (prev != NULL)
835 {
836 DEBUG_FTRACE ("using prev's callers");
837 ftrace_fixup_caller (btinfo, next, prev, flags);
838 }
839 }
840 else
841 {
842 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
843 link to add the tail callers to NEXT's back trace.
844
845 This removes NEXT->UP from NEXT's back trace. It will be added back
846 when connecting NEXT and PREV's callers - provided they exist.
847
848 If PREV's back trace consists of a series of tail calls without an
849 actual call, there will be no further connection and NEXT's caller will
850 be removed for good. To catch this case, we handle it here and connect
851 the top of PREV's back trace to NEXT's caller. */
852 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
853 {
854 struct btrace_function *caller;
855 btrace_function_flags next_flags, prev_flags;
856
857 /* We checked NEXT->UP above so CALLER can't be NULL. */
858 caller = ftrace_find_call_by_number (btinfo, next->up);
859 next_flags = next->flags;
860 prev_flags = prev->flags;
861
862 DEBUG_FTRACE ("adding prev's tail calls to next");
863
864 prev = ftrace_find_call_by_number (btinfo, prev->up);
865 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
866
867 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
868 prev->up))
869 {
870 /* At the end of PREV's back trace, continue with CALLER. */
871 if (prev->up == 0)
872 {
873 DEBUG_FTRACE ("fixing up link for tailcall chain");
874 ftrace_debug (prev, "..top");
875 ftrace_debug (caller, "..up");
876
877 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
878
879 /* If we skipped any tail calls, this may move CALLER to a
880 different function level.
881
882 Note that changing CALLER's level is only OK because we
883 know that this is the last iteration of the bottom-to-top
884 walk in ftrace_connect_backtrace.
885
886 Otherwise we will fix up CALLER's level when we connect it
887 to PREV's caller in the next iteration. */
888 ftrace_fixup_level (btinfo, caller,
889 prev->level - caller->level - 1);
890 break;
891 }
892
893 /* There's nothing to do if we find a real call. */
894 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
895 {
896 DEBUG_FTRACE ("will fix up link in next iteration");
897 break;
898 }
899 }
900 }
901 }
902 }
903
904 /* Connect function segments on the same level in the back trace at LHS and RHS.
905 The back traces at LHS and RHS are expected to match according to
906 ftrace_match_backtrace. BTINFO is the branch trace information for the
907 current thread. */
908
909 static void
910 ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
911 struct btrace_function *lhs,
912 struct btrace_function *rhs)
913 {
914 while (lhs != NULL && rhs != NULL)
915 {
916 struct btrace_function *prev, *next;
917
918 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
919
920 /* Connecting LHS and RHS may change the up link. */
921 prev = lhs;
922 next = rhs;
923
924 lhs = ftrace_get_caller (btinfo, lhs);
925 rhs = ftrace_get_caller (btinfo, rhs);
926
927 ftrace_connect_bfun (btinfo, prev, next);
928 }
929 }
930
931 /* Bridge the gap between two function segments left and right of a gap if their
932 respective back traces match in at least MIN_MATCHES functions. BTINFO is
933 the branch trace information for the current thread.
934
935 Returns non-zero if the gap could be bridged, zero otherwise. */
936
937 static int
938 ftrace_bridge_gap (struct btrace_thread_info *btinfo,
939 struct btrace_function *lhs, struct btrace_function *rhs,
940 int min_matches)
941 {
942 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
943 int best_matches;
944
945 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
946 rhs->insn_offset - 1, min_matches);
947
948 best_matches = 0;
949 best_l = NULL;
950 best_r = NULL;
951
952 /* We search the back traces of LHS and RHS for valid connections and connect
953 the two function segments that give the longest combined back trace. */
954
955 for (cand_l = lhs; cand_l != NULL;
956 cand_l = ftrace_get_caller (btinfo, cand_l))
957 for (cand_r = rhs; cand_r != NULL;
958 cand_r = ftrace_get_caller (btinfo, cand_r))
959 {
960 int matches;
961
962 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
963 if (best_matches < matches)
964 {
965 best_matches = matches;
966 best_l = cand_l;
967 best_r = cand_r;
968 }
969 }
970
971 /* We need at least MIN_MATCHES matches. */
972 gdb_assert (min_matches > 0);
973 if (best_matches < min_matches)
974 return 0;
975
976 DEBUG_FTRACE ("..matches: %d", best_matches);
977
978 /* We will fix up the level of BEST_R and succeeding function segments such
979 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
980
981 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
982 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
983
984 To catch this, we already fix up the level here where we can start at RHS
985 instead of at BEST_R. We will ignore the level fixup when connecting
986 BEST_L to BEST_R as they will already be on the same level. */
987 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
988
989 ftrace_connect_backtrace (btinfo, best_l, best_r);
990
991 return best_matches;
992 }
993
994 /* Try to bridge gaps due to overflow or decode errors by connecting the
995 function segments that are separated by the gap. */
996
997 static void
998 btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
999 {
1000 struct btrace_thread_info *btinfo = &tp->btrace;
1001 std::vector<unsigned int> remaining;
1002 int min_matches;
1003
1004 DEBUG ("bridge gaps");
1005
1006 /* We require a minimum amount of matches for bridging a gap. The number of
1007 required matches will be lowered with each iteration.
1008
1009 The more matches the higher our confidence that the bridging is correct.
1010 For big gaps or small traces, however, it may not be feasible to require a
1011 high number of matches. */
1012 for (min_matches = 5; min_matches > 0; --min_matches)
1013 {
1014 /* Let's try to bridge as many gaps as we can. In some cases, we need to
1015 skip a gap and revisit it again after we closed later gaps. */
1016 while (!gaps.empty ())
1017 {
1018 for (const unsigned int number : gaps)
1019 {
1020 struct btrace_function *gap, *lhs, *rhs;
1021 int bridged;
1022
1023 gap = ftrace_find_call_by_number (btinfo, number);
1024
1025 /* We may have a sequence of gaps if we run from one error into
1026 the next as we try to re-sync onto the trace stream. Ignore
1027 all but the leftmost gap in such a sequence.
1028
1029 Also ignore gaps at the beginning of the trace. */
1030 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
1031 if (lhs == NULL || lhs->errcode != 0)
1032 continue;
1033
1034 /* Skip gaps to the right. */
1035 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
1036 while (rhs != NULL && rhs->errcode != 0)
1037 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
1038
1039 /* Ignore gaps at the end of the trace. */
1040 if (rhs == NULL)
1041 continue;
1042
1043 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
1044
1045 /* Keep track of gaps we were not able to bridge and try again.
1046 If we just pushed them to the end of GAPS we would risk an
1047 infinite loop in case we simply cannot bridge a gap. */
1048 if (bridged == 0)
1049 remaining.push_back (number);
1050 }
1051
1052 /* Let's see if we made any progress. */
1053 if (remaining.size () == gaps.size ())
1054 break;
1055
1056 gaps.clear ();
1057 gaps.swap (remaining);
1058 }
1059
1060 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
1061 if (gaps.empty ())
1062 break;
1063
1064 remaining.clear ();
1065 }
1066
1067 /* We may omit this in some cases. Not sure it is worth the extra
1068 complication, though. */
1069 ftrace_compute_global_level_offset (btinfo);
1070 }
1071
1072 /* Compute the function branch trace from BTS trace. */
1073
1074 static void
1075 btrace_compute_ftrace_bts (struct thread_info *tp,
1076 const struct btrace_data_bts *btrace,
1077 std::vector<unsigned int> &gaps)
1078 {
1079 /* We may end up doing target calls that require the current thread to be TP,
1080 for example reading memory through gdb_insn_length. Make sure TP is the
1081 current thread. */
1082 scoped_restore_current_thread restore_thread;
1083 switch_to_thread (tp);
1084
1085 struct btrace_thread_info *btinfo;
1086 unsigned int blk;
1087 int level;
1088
1089 gdbarch *gdbarch = current_inferior ()->arch ();
1090 btinfo = &tp->btrace;
1091 blk = btrace->blocks->size ();
1092
1093 if (btinfo->functions.empty ())
1094 level = INT_MAX;
1095 else
1096 level = -btinfo->level;
1097
1098 while (blk != 0)
1099 {
1100 CORE_ADDR pc;
1101
1102 blk -= 1;
1103
1104 const btrace_block &block = btrace->blocks->at (blk);
1105 pc = block.begin;
1106
1107 for (;;)
1108 {
1109 struct btrace_function *bfun;
1110 struct btrace_insn insn;
1111 int size;
1112
1113 /* We should hit the end of the block. Warn if we went too far. */
1114 if (block.end < pc)
1115 {
1116 /* Indicate the gap in the trace. */
1117 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
1118
1119 warning (_("Recorded trace may be corrupted at instruction "
1120 "%u (pc = %s)."), bfun->insn_offset - 1,
1121 core_addr_to_string_nz (pc));
1122
1123 break;
1124 }
1125
1126 bfun = ftrace_update_function (btinfo,
1127 std::make_optional<CORE_ADDR> (pc));
1128
1129 /* Maintain the function level offset.
1130 For all but the last block, we do it here. */
1131 if (blk != 0)
1132 level = std::min (level, bfun->level);
1133
1134 size = 0;
1135 try
1136 {
1137 size = gdb_insn_length (gdbarch, pc);
1138 }
1139 catch (const gdb_exception_error &error)
1140 {
1141 }
1142
1143 insn.pc = pc;
1144 insn.size = size;
1145 insn.iclass = ftrace_classify_insn (gdbarch, pc);
1146 insn.flags = 0;
1147
1148 ftrace_update_insns (bfun, insn);
1149
1150 /* We're done once we pushed the instruction at the end. */
1151 if (block.end == pc)
1152 break;
1153
1154 /* We can't continue if we fail to compute the size. */
1155 if (size <= 0)
1156 {
1157 /* Indicate the gap in the trace. We just added INSN so we're
1158 not at the beginning. */
1159 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
1160
1161 warning (_("Recorded trace may be incomplete at instruction %u "
1162 "(pc = %s)."), bfun->insn_offset - 1,
1163 core_addr_to_string_nz (pc));
1164
1165 break;
1166 }
1167
1168 pc += size;
1169
1170 /* Maintain the function level offset.
1171 For the last block, we do it here to not consider the last
1172 instruction.
1173 Since the last instruction corresponds to the current instruction
1174 and is not really part of the execution history, it shouldn't
1175 affect the level. */
1176 if (blk == 0)
1177 level = std::min (level, bfun->level);
1178 }
1179 }
1180
1181 /* LEVEL is the minimal function level of all btrace function segments.
1182 Define the global level offset to -LEVEL so all function levels are
1183 normalized to start at zero. */
1184 btinfo->level = -level;
1185 }
1186
1187 #if defined (HAVE_LIBIPT)
1188
1189 static enum btrace_insn_class
1190 pt_reclassify_insn (enum pt_insn_class iclass)
1191 {
1192 switch (iclass)
1193 {
1194 case ptic_call:
1195 return BTRACE_INSN_CALL;
1196
1197 case ptic_return:
1198 return BTRACE_INSN_RETURN;
1199
1200 case ptic_jump:
1201 return BTRACE_INSN_JUMP;
1202
1203 default:
1204 return BTRACE_INSN_OTHER;
1205 }
1206 }
1207
1208 /* Return the btrace instruction flags for INSN. */
1209
1210 static btrace_insn_flags
1211 pt_btrace_insn_flags (const struct pt_insn &insn)
1212 {
1213 btrace_insn_flags flags = 0;
1214
1215 if (insn.speculative)
1216 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1217
1218 return flags;
1219 }
1220
1221 /* Return the btrace instruction for INSN. */
1222
1223 static btrace_insn
1224 pt_btrace_insn (const struct pt_insn &insn)
1225 {
1226 return {{static_cast<CORE_ADDR> (insn.ip)},
1227 static_cast<gdb_byte> (insn.size),
1228 pt_reclassify_insn (insn.iclass),
1229 pt_btrace_insn_flags (insn)};
1230 }
1231
1232 #if defined (HAVE_PT_INSN_EVENT)
1233 /* Helper for events that will result in an aux_insn. */
1234
1235 static void
1236 handle_pt_aux_insn (btrace_thread_info *btinfo, std::string &aux_str,
1237 std::optional<CORE_ADDR> pc)
1238 {
1239 btinfo->aux_data.emplace_back (std::move (aux_str));
1240 struct btrace_function *bfun = ftrace_update_function (btinfo, pc);
1241
1242 btrace_insn insn {{btinfo->aux_data.size () - 1}, 0,
1243 BTRACE_INSN_AUX, 0};
1244
1245 ftrace_update_insns (bfun, insn);
1246 }
1247
1248 /* Check if the recording contains real instructions and not only auxiliary
1249 instructions since the last gap (or since the beginning). */
1250
1251 static bool
1252 ftrace_contains_non_aux_since_last_gap (const btrace_thread_info *btinfo)
1253 {
1254 const std::vector<btrace_function> &functions = btinfo->functions;
1255
1256 std::vector<btrace_function>::const_reverse_iterator rit;
1257 for (rit = functions.crbegin (); rit != functions.crend (); ++rit)
1258 {
1259 if (rit->errcode != 0)
1260 return false;
1261
1262 if ((rit->flags & BFUN_CONTAINS_NON_AUX) != 0)
1263 return true;
1264 }
1265
1266 return false;
1267 }
1268 #endif /* defined (HAVE_PT_INSN_EVENT) */
1269
1270 #if (LIBIPT_VERSION >= 0x201)
1271 /* Translate an interrupt vector to a mnemonic string as defined for x86.
1272 Returns nullptr if there is none. */
1273
1274 static const char *
1275 decode_interrupt_vector (const uint8_t vector)
1276 {
1277 static const char *mnemonic[]
1278 = { "#de", "#db", nullptr, "#bp", "#of", "#br", "#ud", "#nm",
1279 "#df", "#mf", "#ts", "#np", "#ss", "#gp", "#pf", nullptr,
1280 "#mf", "#ac", "#mc", "#xm", "#ve", "#cp" };
1281
1282 if (vector < (sizeof (mnemonic) / sizeof (mnemonic[0])))
1283 return mnemonic[vector];
1284
1285 return nullptr;
1286 }
1287 #endif /* defined (LIBIPT_VERSION >= 0x201) */
1288
1289 /* Handle instruction decode events (libipt-v2). */
1290
1291 static int
1292 handle_pt_insn_events (struct btrace_thread_info *btinfo,
1293 struct pt_insn_decoder *decoder,
1294 std::vector<unsigned int> &gaps, int status)
1295 {
1296 #if defined (HAVE_PT_INSN_EVENT)
1297 while (status & pts_event_pending)
1298 {
1299 struct pt_event event;
1300 uint64_t offset;
1301 std::optional<CORE_ADDR> pc;
1302
1303 status = pt_insn_event (decoder, &event, sizeof (event));
1304 if (status < 0)
1305 break;
1306
1307 switch (event.type)
1308 {
1309 default:
1310 break;
1311
1312 case ptev_enabled:
1313 {
1314 if (event.status_update != 0)
1315 break;
1316
1317 /* Only create a new gap if there are non-aux instructions in
1318 the trace since the last gap. We could be at the beginning
1319 of the recording and could already have handled one or more
1320 events, like ptev_iret, that created aux insns. In that
1321 case we don't want to create a gap or print a warning. */
1322 if (event.variant.enabled.resumed == 0
1323 && ftrace_contains_non_aux_since_last_gap (btinfo))
1324 {
1325 struct btrace_function *bfun
1326 = ftrace_new_gap (btinfo, BDE_PT_NON_CONTIGUOUS, gaps);
1327
1328 pt_insn_get_offset (decoder, &offset);
1329
1330 warning
1331 (_("Non-contiguous trace at instruction %u (offset = 0x%"
1332 PRIx64 ")."), bfun->insn_offset - 1, offset);
1333 }
1334
1335 break;
1336 }
1337
1338 case ptev_overflow:
1339 {
1340 struct btrace_function *bfun
1341 = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1342
1343 pt_insn_get_offset (decoder, &offset);
1344
1345 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1346 bfun->insn_offset - 1, offset);
1347
1348 break;
1349 }
1350 #if defined (HAVE_STRUCT_PT_EVENT_VARIANT_PTWRITE)
1351 case ptev_ptwrite:
1352 {
1353 std::optional<std::string> ptw_string;
1354
1355 /* Lookup the PC if available. The event often doesn't provide
1356 one, so we look into the last function segment as well.
1357 Looking further back makes limited sense for ptwrite. */
1358 if (event.ip_suppressed == 0)
1359 pc = event.variant.ptwrite.ip;
1360 else if (!btinfo->functions.empty ())
1361 {
1362 std::vector<btrace_insn> &insns
1363 = btinfo->functions.back ().insn;
1364 for (auto insn = insns.rbegin (); insn != insns.rend ();
1365 ++insn)
1366 {
1367 switch (insn->iclass)
1368 {
1369 case BTRACE_INSN_AUX:
1370 continue;
1371
1372 case BTRACE_INSN_OTHER:
1373 case BTRACE_INSN_CALL:
1374 case BTRACE_INSN_RETURN:
1375 case BTRACE_INSN_JUMP:
1376 pc = insn->pc;
1377 break;
1378 /* No default to rely on compiler warnings. */
1379 }
1380 break;
1381 }
1382 }
1383
1384 if (!pc.has_value ())
1385 warning (_("Failed to determine the PC for ptwrite."));
1386
1387
1388 if (btinfo->ptw_callback_fun != nullptr)
1389 ptw_string
1390 = btinfo->ptw_callback_fun (event.variant.ptwrite.payload,
1391 pc, btinfo->ptw_context);
1392
1393 if (ptw_string.has_value () && (*ptw_string).empty ())
1394 continue;
1395
1396 if (!ptw_string.has_value ())
1397 *ptw_string = hex_string (event.variant.ptwrite.payload);
1398
1399 handle_pt_aux_insn (btinfo, *ptw_string, pc);
1400
1401 break;
1402 }
1403 #endif /* defined (HAVE_STRUCT_PT_EVENT_VARIANT_PTWRITE) */
1404
1405 #if (LIBIPT_VERSION >= 0x201)
1406 case ptev_interrupt:
1407 {
1408 std::string aux_string = std::string (_("interrupt: vector = "))
1409 + hex_string (event.variant.interrupt.vector);
1410
1411 const char *decoded
1412 = decode_interrupt_vector (event.variant.interrupt.vector);
1413 if (decoded != nullptr)
1414 aux_string += std::string (" (") + decoded + ")";
1415
1416 if (event.variant.interrupt.has_cr2 != 0)
1417 {
1418 aux_string += std::string (", cr2 = ")
1419 + hex_string (event.variant.interrupt.cr2);
1420 }
1421
1422 if (event.ip_suppressed == 0)
1423 {
1424 pc = event.variant.interrupt.ip;
1425 aux_string += std::string (", ip = ") + hex_string (*pc);
1426 }
1427
1428 handle_pt_aux_insn (btinfo, aux_string, pc);
1429 break;
1430 }
1431
1432 case ptev_iret:
1433 {
1434 std::string aux_string = std::string (_("iret"));
1435
1436 if (event.ip_suppressed == 0)
1437 {
1438 pc = event.variant.iret.ip;
1439 aux_string += std::string (": ip = ") + hex_string (*pc);
1440 }
1441
1442 handle_pt_aux_insn (btinfo, aux_string, pc);
1443 break;
1444 }
1445
1446 case ptev_smi:
1447 {
1448 std::string aux_string = std::string (_("smi"));
1449
1450 if (event.ip_suppressed == 0)
1451 {
1452 pc = event.variant.smi.ip;
1453 aux_string += std::string (": ip = ") + hex_string (*pc);
1454 }
1455
1456 handle_pt_aux_insn (btinfo, aux_string, pc);
1457 break;
1458 }
1459
1460 case ptev_rsm:
1461 {
1462 std::string aux_string = std::string (_("rsm"));
1463
1464 if (event.ip_suppressed == 0)
1465 {
1466 pc = event.variant.rsm.ip;
1467 aux_string += std::string (": ip = ") + hex_string (*pc);
1468 }
1469
1470 handle_pt_aux_insn (btinfo, aux_string, pc);
1471 break;
1472 }
1473
1474 case ptev_sipi:
1475 {
1476 std::string aux_string = std::string (_("sipi: vector = "))
1477 + hex_string (event.variant.sipi.vector);
1478
1479 handle_pt_aux_insn (btinfo, aux_string, pc);
1480 break;
1481 }
1482
1483 case ptev_init:
1484 {
1485 std::string aux_string = std::string (_("init"));
1486
1487 if (event.ip_suppressed == 0)
1488 {
1489 pc = event.variant.init.ip;
1490 aux_string += std::string (": ip = ") + hex_string (*pc);
1491 }
1492
1493 handle_pt_aux_insn (btinfo, aux_string, pc);
1494 break;
1495 }
1496
1497 case ptev_vmentry:
1498 {
1499 std::string aux_string = std::string (_("vmentry"));
1500
1501 if (event.ip_suppressed == 0)
1502 {
1503 pc = event.variant.vmentry.ip;
1504 aux_string += std::string (": ip = ") + hex_string (*pc);
1505 }
1506
1507 handle_pt_aux_insn (btinfo, aux_string, pc);
1508 break;
1509 }
1510
1511 case ptev_vmexit:
1512 {
1513 std::string aux_string = std::string (_("vmexit"));
1514
1515 if (event.variant.vmexit.has_vector != 0
1516 || event.variant.vmexit.has_vmxr != 0
1517 || event.variant.vmexit.has_vmxq != 0
1518 || event.ip_suppressed != 0)
1519 aux_string += std::string (":");
1520
1521 if (event.variant.vmexit.has_vector != 0)
1522 {
1523 aux_string += std::string (_(" vector = "))
1524 + hex_string (event.variant.vmexit.vector);
1525
1526 const char* decoded = decode_interrupt_vector
1527 (event.variant.vmexit.vector);
1528 if (decoded != nullptr)
1529 aux_string += std::string (" (") + decoded + ")";
1530 }
1531
1532 if (event.variant.vmexit.has_vmxr != 0)
1533 {
1534 std::string separator = aux_string.back () == ':' ? "" : ",";
1535 aux_string += separator + std::string (" vmxr = ")
1536 + hex_string (event.variant.vmexit.vmxr);
1537 }
1538
1539 if (event.variant.vmexit.has_vmxq != 0)
1540 {
1541 std::string separator = aux_string.back () == ':' ? "" : ",";
1542 aux_string += separator + std::string (" vmxq = ")
1543 + hex_string (event.variant.vmexit.vmxq);
1544 }
1545
1546 if (event.ip_suppressed == 0)
1547 {
1548 pc = event.variant.vmexit.ip;
1549 std::string separator = aux_string.back () == ':' ? "" : ",";
1550 aux_string += separator + std::string (" ip = ")
1551 + hex_string (*pc);
1552 }
1553
1554 handle_pt_aux_insn (btinfo, aux_string, pc);
1555 break;
1556 }
1557
1558 case ptev_shutdown:
1559 {
1560 std::string aux_string = std::string (_("shutdown"));
1561
1562 if (event.ip_suppressed == 0)
1563 {
1564 pc = event.variant.shutdown.ip;
1565 aux_string += std::string (": ip = ") + hex_string (*pc);
1566 }
1567
1568 handle_pt_aux_insn (btinfo, aux_string, pc);
1569 break;
1570 }
1571
1572 case ptev_uintr:
1573 {
1574 std::string aux_string = std::string (_("uintr: vector = "))
1575 + hex_string (event.variant.uintr.vector);
1576
1577 if (event.ip_suppressed == 0)
1578 {
1579 pc = event.variant.uintr.ip;
1580 aux_string += std::string (", ip = ") + hex_string (*pc);
1581 }
1582
1583 handle_pt_aux_insn (btinfo, aux_string, pc);
1584 break;
1585 }
1586
1587 case ptev_uiret:
1588 {
1589 std::string aux_string = std::string (_("uiret"));
1590
1591 if (event.ip_suppressed == 0)
1592 {
1593 pc = event.variant.uiret.ip;
1594 aux_string += std::string (": ip = ") + hex_string (*pc);
1595 }
1596
1597 handle_pt_aux_insn (btinfo, aux_string, pc);
1598 break;
1599 }
1600 #endif /* defined (LIBIPT_VERSION >= 0x201) */
1601 }
1602 }
1603 #endif /* defined (HAVE_PT_INSN_EVENT) */
1604
1605 return status;
1606 }
1607
1608 /* Handle events indicated by flags in INSN (libipt-v1). */
1609
1610 static void
1611 handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1612 struct pt_insn_decoder *decoder,
1613 const struct pt_insn &insn,
1614 std::vector<unsigned int> &gaps)
1615 {
1616 #if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1617 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1618 times, we continue from the same instruction we stopped before. This is
1619 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1620 means that we continued from some other instruction. Indicate this as a
1621 trace gap except when tracing just started. */
1622 if (insn.enabled && !btinfo->functions.empty ())
1623 {
1624 struct btrace_function *bfun;
1625 uint64_t offset;
1626
1627 bfun = ftrace_new_gap (btinfo, BDE_PT_NON_CONTIGUOUS, gaps);
1628
1629 pt_insn_get_offset (decoder, &offset);
1630
1631 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1632 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1633 insn.ip);
1634 }
1635 #endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1636
1637 #if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1638 /* Indicate trace overflows. */
1639 if (insn.resynced)
1640 {
1641 struct btrace_function *bfun;
1642 uint64_t offset;
1643
1644 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1645
1646 pt_insn_get_offset (decoder, &offset);
1647
1648 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1649 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1650 }
1651 #endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1652 }
1653
1654 /* Add function branch trace to BTINFO using DECODER. */
1655
1656 static void
1657 ftrace_add_pt (struct btrace_thread_info *btinfo,
1658 struct pt_insn_decoder *decoder,
1659 int *plevel,
1660 std::vector<unsigned int> &gaps)
1661 {
1662 struct btrace_function *bfun;
1663 uint64_t offset;
1664 int status;
1665
1666 /* Register the ptwrite filter. */
1667 apply_ext_lang_ptwrite_filter (btinfo);
1668
1669 for (;;)
1670 {
1671 struct pt_insn insn;
1672
1673 status = pt_insn_sync_forward (decoder);
1674 if (status < 0)
1675 {
1676 if (status != -pte_eos)
1677 warning (_("Failed to synchronize onto the Intel Processor "
1678 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
1679 break;
1680 }
1681
1682 for (;;)
1683 {
1684 /* Handle events from the previous iteration or synchronization. */
1685 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1686 if (status < 0)
1687 break;
1688
1689 status = pt_insn_next (decoder, &insn, sizeof(insn));
1690 if (status < 0)
1691 break;
1692
1693 /* Handle events indicated by flags in INSN. */
1694 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
1695
1696 bfun
1697 = ftrace_update_function (btinfo,
1698 std::make_optional<CORE_ADDR> (insn.ip));
1699
1700 /* Maintain the function level offset. */
1701 *plevel = std::min (*plevel, bfun->level);
1702
1703 ftrace_update_insns (bfun, pt_btrace_insn (insn));
1704 }
1705
1706 if (status == -pte_eos)
1707 break;
1708
1709 /* Indicate the gap in the trace. */
1710 bfun = ftrace_new_gap (btinfo, status, gaps);
1711
1712 pt_insn_get_offset (decoder, &offset);
1713
1714 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
1715 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1716 offset, insn.ip, pt_errstr (pt_errcode (status)));
1717 }
1718 }
1719
1720 /* A callback function to allow the trace decoder to read the inferior's
1721 memory. */
1722
1723 static int
1724 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
1725 const struct pt_asid *asid, uint64_t pc,
1726 void *context)
1727 {
1728 int result, errcode;
1729
1730 result = (int) size;
1731 try
1732 {
1733 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
1734 if (errcode != 0)
1735 result = -pte_nomap;
1736 }
1737 catch (const gdb_exception_error &error)
1738 {
1739 result = -pte_nomap;
1740 }
1741
1742 return result;
1743 }
1744
1745 /* Translate the vendor from one enum to another. */
1746
1747 static enum pt_cpu_vendor
1748 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1749 {
1750 switch (vendor)
1751 {
1752 default:
1753 return pcv_unknown;
1754
1755 case CV_INTEL:
1756 return pcv_intel;
1757 }
1758 }
1759
1760 /* Finalize the function branch trace after decode. */
1761
1762 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1763 struct thread_info *tp, int level)
1764 {
1765 pt_insn_free_decoder (decoder);
1766
1767 /* LEVEL is the minimal function level of all btrace function segments.
1768 Define the global level offset to -LEVEL so all function levels are
1769 normalized to start at zero. */
1770 tp->btrace.level = -level;
1771
1772 /* Add a single last instruction entry for the current PC.
1773 This allows us to compute the backtrace at the current PC using both
1774 standard unwind and btrace unwind.
1775 This extra entry is ignored by all record commands. */
1776 btrace_add_pc (tp);
1777 }
1778
1779 /* Compute the function branch trace from Intel Processor Trace
1780 format. */
1781
1782 static void
1783 btrace_compute_ftrace_pt (struct thread_info *tp,
1784 const struct btrace_data_pt *btrace,
1785 std::vector<unsigned int> &gaps)
1786 {
1787 /* We may end up doing target calls that require the current thread to be TP,
1788 for example reading memory through btrace_pt_readmem_callback. Make sure
1789 TP is the current thread. */
1790 scoped_restore_current_thread restore_thread;
1791 switch_to_thread (tp);
1792
1793 struct btrace_thread_info *btinfo;
1794 struct pt_insn_decoder *decoder;
1795 struct pt_config config;
1796 int level, errcode;
1797
1798 if (btrace->size == 0)
1799 return;
1800
1801 btinfo = &tp->btrace;
1802 if (btinfo->functions.empty ())
1803 level = INT_MAX;
1804 else
1805 level = -btinfo->level;
1806
1807 pt_config_init(&config);
1808 config.begin = btrace->data;
1809 config.end = btrace->data + btrace->size;
1810
1811 /* We treat an unknown vendor as 'no errata'. */
1812 if (btrace->config.cpu.vendor != CV_UNKNOWN)
1813 {
1814 config.cpu.vendor
1815 = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1816 config.cpu.family = btrace->config.cpu.family;
1817 config.cpu.model = btrace->config.cpu.model;
1818 config.cpu.stepping = btrace->config.cpu.stepping;
1819
1820 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1821 if (errcode < 0)
1822 error (_("Failed to configure the Intel Processor Trace "
1823 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
1824 }
1825
1826 decoder = pt_insn_alloc_decoder (&config);
1827 if (decoder == NULL)
1828 error (_("Failed to allocate the Intel Processor Trace decoder."));
1829
1830 try
1831 {
1832 struct pt_image *image;
1833
1834 image = pt_insn_get_image(decoder);
1835 if (image == NULL)
1836 error (_("Failed to configure the Intel Processor Trace decoder."));
1837
1838 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1839 if (errcode < 0)
1840 error (_("Failed to configure the Intel Processor Trace decoder: "
1841 "%s."), pt_errstr (pt_errcode (errcode)));
1842
1843 ftrace_add_pt (btinfo, decoder, &level, gaps);
1844 }
1845 catch (const gdb_exception &error)
1846 {
1847 /* Indicate a gap in the trace if we quit trace processing. */
1848 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
1849 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
1850
1851 btrace_finalize_ftrace_pt (decoder, tp, level);
1852
1853 throw;
1854 }
1855
1856 btrace_finalize_ftrace_pt (decoder, tp, level);
1857 }
1858
1859 #else /* defined (HAVE_LIBIPT) */
1860
1861 static void
1862 btrace_compute_ftrace_pt (struct thread_info *tp,
1863 const struct btrace_data_pt *btrace,
1864 std::vector<unsigned int> &gaps)
1865 {
1866 internal_error (_("Unexpected branch trace format."));
1867 }
1868
1869 #endif /* defined (HAVE_LIBIPT) */
1870
1871 /* Compute the function branch trace from a block branch trace BTRACE for
1872 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1873 branch trace configuration. This is currently only used for the PT
1874 format. */
1875
1876 static void
1877 btrace_compute_ftrace_1 (struct thread_info *tp,
1878 struct btrace_data *btrace,
1879 const struct btrace_cpu *cpu,
1880 std::vector<unsigned int> &gaps)
1881 {
1882 DEBUG ("compute ftrace");
1883
1884 switch (btrace->format)
1885 {
1886 case BTRACE_FORMAT_NONE:
1887 return;
1888
1889 case BTRACE_FORMAT_BTS:
1890 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
1891 return;
1892
1893 case BTRACE_FORMAT_PT:
1894 /* Overwrite the cpu we use for enabling errata workarounds. */
1895 if (cpu != nullptr)
1896 btrace->variant.pt.config.cpu = *cpu;
1897
1898 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
1899 return;
1900 }
1901
1902 internal_error (_("Unknown branch trace format."));
1903 }
1904
1905 static void
1906 btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
1907 {
1908 if (!gaps.empty ())
1909 {
1910 tp->btrace.ngaps += gaps.size ();
1911 btrace_bridge_gaps (tp, gaps);
1912 }
1913 }
1914
1915 static void
1916 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
1917 const struct btrace_cpu *cpu)
1918 {
1919 std::vector<unsigned int> gaps;
1920
1921 try
1922 {
1923 btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
1924 }
1925 catch (const gdb_exception &error)
1926 {
1927 btrace_finalize_ftrace (tp, gaps);
1928
1929 throw;
1930 }
1931
1932 btrace_finalize_ftrace (tp, gaps);
1933 }
1934
1935 /* Add an entry for the current PC. */
1936
1937 static void
1938 btrace_add_pc (struct thread_info *tp)
1939 {
1940 struct btrace_data btrace;
1941 struct regcache *regcache;
1942 CORE_ADDR pc;
1943
1944 regcache = get_thread_regcache (tp);
1945 pc = regcache_read_pc (regcache);
1946
1947 btrace.format = BTRACE_FORMAT_BTS;
1948 btrace.variant.bts.blocks = new std::vector<btrace_block>;
1949
1950 btrace.variant.bts.blocks->emplace_back (pc, pc);
1951
1952 btrace_compute_ftrace (tp, &btrace, NULL);
1953 }
1954
1955 /* See btrace.h. */
1956
1957 void
1958 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1959 {
1960 if (tp->btrace.target != NULL)
1961 error (_("Recording already enabled on thread %s (%s)."),
1962 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1963
1964 #if !defined (HAVE_LIBIPT)
1965 if (conf->format == BTRACE_FORMAT_PT)
1966 error (_("Intel Processor Trace support was disabled at compile time."));
1967 #endif /* !defined (HAVE_LIBIPT) */
1968
1969 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1970 tp->ptid.to_string ().c_str ());
1971
1972 tp->btrace.target = target_enable_btrace (tp, conf);
1973
1974 if (tp->btrace.target == NULL)
1975 error (_("Failed to enable recording on thread %s (%s)."),
1976 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
1977
1978 /* We need to undo the enable in case of errors. */
1979 try
1980 {
1981 /* Add an entry for the current PC so we start tracing from where we
1982 enabled it.
1983
1984 If we can't access TP's registers, TP is most likely running. In this
1985 case, we can't really say where tracing was enabled so it should be
1986 safe to simply skip this step.
1987
1988 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1989 start at the PC at which tracing was enabled. */
1990 if (conf->format != BTRACE_FORMAT_PT
1991 && can_access_registers_thread (tp))
1992 btrace_add_pc (tp);
1993 }
1994 catch (const gdb_exception &exception)
1995 {
1996 btrace_disable (tp);
1997
1998 throw;
1999 }
2000 }
2001
2002 /* See btrace.h. */
2003
2004 const struct btrace_config *
2005 btrace_conf (const struct btrace_thread_info *btinfo)
2006 {
2007 if (btinfo->target == NULL)
2008 return NULL;
2009
2010 return target_btrace_conf (btinfo->target);
2011 }
2012
2013 /* See btrace.h. */
2014
2015 void
2016 btrace_disable (struct thread_info *tp)
2017 {
2018 struct btrace_thread_info *btp = &tp->btrace;
2019
2020 if (btp->target == NULL)
2021 error (_("Recording not enabled on thread %s (%s)."),
2022 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
2023
2024 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
2025 tp->ptid.to_string ().c_str ());
2026
2027 target_disable_btrace (btp->target);
2028 btp->target = NULL;
2029
2030 btrace_clear (tp);
2031 }
2032
2033 /* See btrace.h. */
2034
2035 void
2036 btrace_teardown (struct thread_info *tp)
2037 {
2038 struct btrace_thread_info *btp = &tp->btrace;
2039
2040 if (btp->target == NULL)
2041 return;
2042
2043 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
2044 tp->ptid.to_string ().c_str ());
2045
2046 target_teardown_btrace (btp->target);
2047 btp->target = NULL;
2048
2049 btrace_clear (tp);
2050 }
2051
2052 /* Stitch branch trace in BTS format. */
2053
2054 static int
2055 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
2056 {
2057 struct btrace_thread_info *btinfo;
2058 struct btrace_function *last_bfun;
2059 btrace_block *first_new_block;
2060
2061 btinfo = &tp->btrace;
2062 gdb_assert (!btinfo->functions.empty ());
2063 gdb_assert (!btrace->blocks->empty ());
2064
2065 last_bfun = &btinfo->functions.back ();
2066
2067 /* If the existing trace ends with a gap, we just glue the traces
2068 together. We need to drop the last (i.e. chronologically first) block
2069 of the new trace, though, since we can't fill in the start address.*/
2070 if (last_bfun->insn.empty ())
2071 {
2072 btrace->blocks->pop_back ();
2073 return 0;
2074 }
2075
2076 /* Beware that block trace starts with the most recent block, so the
2077 chronologically first block in the new trace is the last block in
2078 the new trace's block vector. */
2079 first_new_block = &btrace->blocks->back ();
2080 const btrace_insn &last_insn = last_bfun->insn.back ();
2081
2082 /* If the current PC at the end of the block is the same as in our current
2083 trace, there are two explanations:
2084 1. we executed the instruction and some branch brought us back.
2085 2. we have not made any progress.
2086 In the first case, the delta trace vector should contain at least two
2087 entries.
2088 In the second case, the delta trace vector should contain exactly one
2089 entry for the partial block containing the current PC. Remove it. */
2090 if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
2091 {
2092 btrace->blocks->pop_back ();
2093 return 0;
2094 }
2095
2096 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
2097 core_addr_to_string_nz (first_new_block->end));
2098
2099 /* Do a simple sanity check to make sure we don't accidentally end up
2100 with a bad block. This should not occur in practice. */
2101 if (first_new_block->end < last_insn.pc)
2102 {
2103 warning (_("Error while trying to read delta trace. Falling back to "
2104 "a full read."));
2105 return -1;
2106 }
2107
2108 /* We adjust the last block to start at the end of our current trace. */
2109 gdb_assert (first_new_block->begin == 0);
2110 first_new_block->begin = last_insn.pc;
2111
2112 /* We simply pop the last insn so we can insert it again as part of
2113 the normal branch trace computation.
2114 Since instruction iterators are based on indices in the instructions
2115 vector, we don't leave any pointers dangling. */
2116 DEBUG ("pruning insn at %s for stitching",
2117 ftrace_print_insn_addr (&last_insn));
2118
2119 last_bfun->insn.pop_back ();
2120
2121 /* The instructions vector may become empty temporarily if this has
2122 been the only instruction in this function segment.
2123 This violates the invariant but will be remedied shortly by
2124 btrace_compute_ftrace when we add the new trace. */
2125
2126 /* The only case where this would hurt is if the entire trace consisted
2127 of just that one instruction. If we remove it, we might turn the now
2128 empty btrace function segment into a gap. But we don't want gaps at
2129 the beginning. To avoid this, we remove the entire old trace. */
2130 if (last_bfun->number == 1 && last_bfun->insn.empty ())
2131 btrace_clear (tp);
2132
2133 return 0;
2134 }
2135
2136 /* Adjust the block trace in order to stitch old and new trace together.
2137 BTRACE is the new delta trace between the last and the current stop.
2138 TP is the traced thread.
2139 May modify BTRACE as well as the existing trace in TP.
2140 Return 0 on success, -1 otherwise. */
2141
2142 static int
2143 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
2144 {
2145 /* If we don't have trace, there's nothing to do. */
2146 if (btrace->empty ())
2147 return 0;
2148
2149 switch (btrace->format)
2150 {
2151 case BTRACE_FORMAT_NONE:
2152 return 0;
2153
2154 case BTRACE_FORMAT_BTS:
2155 return btrace_stitch_bts (&btrace->variant.bts, tp);
2156
2157 case BTRACE_FORMAT_PT:
2158 /* Delta reads are not supported. */
2159 return -1;
2160 }
2161
2162 internal_error (_("Unknown branch trace format."));
2163 }
2164
2165 /* Clear the branch trace histories in BTINFO. */
2166
2167 static void
2168 btrace_clear_history (struct btrace_thread_info *btinfo)
2169 {
2170 xfree (btinfo->insn_history);
2171 xfree (btinfo->call_history);
2172 xfree (btinfo->replay);
2173
2174 btinfo->insn_history = NULL;
2175 btinfo->call_history = NULL;
2176 btinfo->replay = NULL;
2177
2178 btinfo->aux_data.clear ();
2179 }
2180
2181 /* Clear the branch trace maintenance histories in BTINFO. */
2182
2183 static void
2184 btrace_maint_clear (struct btrace_thread_info *btinfo)
2185 {
2186 switch (btinfo->data.format)
2187 {
2188 default:
2189 break;
2190
2191 case BTRACE_FORMAT_BTS:
2192 btinfo->maint.variant.bts.packet_history.begin = 0;
2193 btinfo->maint.variant.bts.packet_history.end = 0;
2194 break;
2195
2196 #if defined (HAVE_LIBIPT)
2197 case BTRACE_FORMAT_PT:
2198 delete btinfo->maint.variant.pt.packets;
2199
2200 btinfo->maint.variant.pt.packets = NULL;
2201 btinfo->maint.variant.pt.packet_history.begin = 0;
2202 btinfo->maint.variant.pt.packet_history.end = 0;
2203 break;
2204 #endif /* defined (HAVE_LIBIPT) */
2205 }
2206 }
2207
2208 /* See btrace.h. */
2209
2210 const char *
2211 btrace_decode_error (enum btrace_format format, int errcode)
2212 {
2213 switch (format)
2214 {
2215 case BTRACE_FORMAT_BTS:
2216 switch (errcode)
2217 {
2218 case BDE_BTS_OVERFLOW:
2219 return _("instruction overflow");
2220
2221 case BDE_BTS_INSN_SIZE:
2222 return _("unknown instruction");
2223
2224 default:
2225 break;
2226 }
2227 break;
2228
2229 #if defined (HAVE_LIBIPT)
2230 case BTRACE_FORMAT_PT:
2231 switch (errcode)
2232 {
2233 case BDE_PT_USER_QUIT:
2234 return _("trace decode cancelled");
2235
2236 case BDE_PT_NON_CONTIGUOUS:
2237 return _("non-contiguous");
2238
2239 case BDE_PT_OVERFLOW:
2240 return _("overflow");
2241
2242 default:
2243 if (errcode < 0)
2244 return pt_errstr (pt_errcode (errcode));
2245 break;
2246 }
2247 break;
2248 #endif /* defined (HAVE_LIBIPT) */
2249
2250 default:
2251 break;
2252 }
2253
2254 return _("unknown");
2255 }
2256
2257 /* See btrace.h. */
2258
2259 void
2260 btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
2261 {
2262 struct btrace_thread_info *btinfo;
2263 struct btrace_target_info *tinfo;
2264 struct btrace_data btrace;
2265 int errcode;
2266
2267 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
2268 tp->ptid.to_string ().c_str ());
2269
2270 btinfo = &tp->btrace;
2271 tinfo = btinfo->target;
2272 if (tinfo == NULL)
2273 return;
2274
2275 /* There's no way we could get new trace while replaying.
2276 On the other hand, delta trace would return a partial record with the
2277 current PC, which is the replay PC, not the last PC, as expected. */
2278 if (btinfo->replay != NULL)
2279 return;
2280
2281 /* With CLI usage, TP is always the current thread when we get here.
2282 However, since we can also store a gdb.Record object in Python
2283 referring to a different thread than the current one, we need to
2284 temporarily set the current thread. */
2285 scoped_restore_current_thread restore_thread;
2286 switch_to_thread (tp);
2287
2288 /* We should not be called on running or exited threads. */
2289 gdb_assert (can_access_registers_thread (tp));
2290
2291 /* Let's first try to extend the trace we already have. */
2292 if (!btinfo->functions.empty ())
2293 {
2294 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
2295 if (errcode == 0)
2296 {
2297 /* Success. Let's try to stitch the traces together. */
2298 errcode = btrace_stitch_trace (&btrace, tp);
2299 }
2300 else
2301 {
2302 /* We failed to read delta trace. Let's try to read new trace. */
2303 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
2304
2305 /* If we got any new trace, discard what we have. */
2306 if (errcode == 0 && !btrace.empty ())
2307 btrace_clear (tp);
2308 }
2309
2310 /* If we were not able to read the trace, we start over. */
2311 if (errcode != 0)
2312 {
2313 btrace_clear (tp);
2314 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
2315 }
2316 }
2317 else
2318 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
2319
2320 /* If we were not able to read the branch trace, signal an error. */
2321 if (errcode != 0)
2322 error (_("Failed to read branch trace."));
2323
2324 /* Compute the trace, provided we have any. */
2325 if (!btrace.empty ())
2326 {
2327 /* Store the raw trace data. The stored data will be cleared in
2328 btrace_clear, so we always append the new trace. */
2329 btrace_data_append (&btinfo->data, &btrace);
2330 btrace_maint_clear (btinfo);
2331
2332 btrace_clear_history (btinfo);
2333 btrace_compute_ftrace (tp, &btrace, cpu);
2334 }
2335 }
2336
2337 /* See btrace.h. */
2338
2339 void
2340 btrace_clear (struct thread_info *tp)
2341 {
2342 struct btrace_thread_info *btinfo;
2343
2344 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
2345 tp->ptid.to_string ().c_str ());
2346
2347 /* Make sure btrace frames that may hold a pointer into the branch
2348 trace data are destroyed. */
2349 reinit_frame_cache ();
2350
2351 btinfo = &tp->btrace;
2352
2353 btinfo->functions.clear ();
2354 btinfo->ngaps = 0;
2355
2356 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2357 btrace_maint_clear (btinfo);
2358 btinfo->data.clear ();
2359 btrace_clear_history (btinfo);
2360 }
2361
2362 /* See btrace.h. */
2363
2364 void
2365 btrace_free_objfile (struct objfile *objfile)
2366 {
2367 DEBUG ("free objfile");
2368
2369 for (thread_info *tp : all_non_exited_threads ())
2370 btrace_clear (tp);
2371 }
2372
2373 /* See btrace.h. */
2374
2375 const struct btrace_insn *
2376 btrace_insn_get (const struct btrace_insn_iterator *it)
2377 {
2378 const struct btrace_function *bfun;
2379 unsigned int index, end;
2380
2381 index = it->insn_index;
2382 bfun = &it->btinfo->functions[it->call_index];
2383
2384 /* Check if the iterator points to a gap in the trace. */
2385 if (bfun->errcode != 0)
2386 return NULL;
2387
2388 /* The index is within the bounds of this function's instruction vector. */
2389 end = bfun->insn.size ();
2390 gdb_assert (0 < end);
2391 gdb_assert (index < end);
2392
2393 return &bfun->insn[index];
2394 }
2395
2396 /* See btrace.h. */
2397
2398 int
2399 btrace_insn_get_error (const struct btrace_insn_iterator *it)
2400 {
2401 return it->btinfo->functions[it->call_index].errcode;
2402 }
2403
2404 /* See btrace.h. */
2405
2406 unsigned int
2407 btrace_insn_number (const struct btrace_insn_iterator *it)
2408 {
2409 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
2410 }
2411
2412 /* See btrace.h. */
2413
2414 void
2415 btrace_insn_begin (struct btrace_insn_iterator *it,
2416 const struct btrace_thread_info *btinfo)
2417 {
2418 if (btinfo->functions.empty ())
2419 error (_("No trace."));
2420
2421 it->btinfo = btinfo;
2422 it->call_index = 0;
2423 it->insn_index = 0;
2424 }
2425
2426 /* See btrace.h. */
2427
2428 void
2429 btrace_insn_end (struct btrace_insn_iterator *it,
2430 const struct btrace_thread_info *btinfo)
2431 {
2432 const struct btrace_function *bfun;
2433 unsigned int length;
2434
2435 if (btinfo->functions.empty ())
2436 error (_("No trace."));
2437
2438 bfun = &btinfo->functions.back ();
2439 length = bfun->insn.size ();
2440
2441 /* The last function may either be a gap or it contains the current
2442 instruction, which is one past the end of the execution trace; ignore
2443 it. */
2444 if (length > 0)
2445 length -= 1;
2446
2447 it->btinfo = btinfo;
2448 it->call_index = bfun->number - 1;
2449 it->insn_index = length;
2450 }
2451
2452 /* See btrace.h. */
2453
2454 unsigned int
2455 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2456 {
2457 const struct btrace_function *bfun;
2458 unsigned int index, steps;
2459
2460 bfun = &it->btinfo->functions[it->call_index];
2461 steps = 0;
2462 index = it->insn_index;
2463
2464 while (stride != 0)
2465 {
2466 unsigned int end, space, adv;
2467
2468 end = bfun->insn.size ();
2469
2470 /* An empty function segment represents a gap in the trace. We count
2471 it as one instruction. */
2472 if (end == 0)
2473 {
2474 const struct btrace_function *next;
2475
2476 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2477 if (next == NULL)
2478 break;
2479
2480 stride -= 1;
2481 steps += 1;
2482
2483 bfun = next;
2484 index = 0;
2485
2486 continue;
2487 }
2488
2489 gdb_assert (0 < end);
2490 gdb_assert (index < end);
2491
2492 /* Compute the number of instructions remaining in this segment. */
2493 space = end - index;
2494
2495 /* Advance the iterator as far as possible within this segment. */
2496 adv = std::min (space, stride);
2497 stride -= adv;
2498 index += adv;
2499 steps += adv;
2500
2501 /* Move to the next function if we're at the end of this one. */
2502 if (index == end)
2503 {
2504 const struct btrace_function *next;
2505
2506 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
2507 if (next == NULL)
2508 {
2509 /* We stepped past the last function.
2510
2511 Let's adjust the index to point to the last instruction in
2512 the previous function. */
2513 index -= 1;
2514 steps -= 1;
2515 break;
2516 }
2517
2518 /* We now point to the first instruction in the new function. */
2519 bfun = next;
2520 index = 0;
2521 }
2522
2523 /* We did make progress. */
2524 gdb_assert (adv > 0);
2525 }
2526
2527 /* Update the iterator. */
2528 it->call_index = bfun->number - 1;
2529 it->insn_index = index;
2530
2531 return steps;
2532 }
2533
2534 /* See btrace.h. */
2535
2536 unsigned int
2537 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2538 {
2539 const struct btrace_function *bfun;
2540 unsigned int index, steps;
2541
2542 bfun = &it->btinfo->functions[it->call_index];
2543 steps = 0;
2544 index = it->insn_index;
2545
2546 while (stride != 0)
2547 {
2548 unsigned int adv;
2549
2550 /* Move to the previous function if we're at the start of this one. */
2551 if (index == 0)
2552 {
2553 const struct btrace_function *prev;
2554
2555 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
2556 if (prev == NULL)
2557 break;
2558
2559 /* We point to one after the last instruction in the new function. */
2560 bfun = prev;
2561 index = bfun->insn.size ();
2562
2563 /* An empty function segment represents a gap in the trace. We count
2564 it as one instruction. */
2565 if (index == 0)
2566 {
2567 stride -= 1;
2568 steps += 1;
2569
2570 continue;
2571 }
2572 }
2573
2574 /* Advance the iterator as far as possible within this segment. */
2575 adv = std::min (index, stride);
2576
2577 stride -= adv;
2578 index -= adv;
2579 steps += adv;
2580
2581 /* We did make progress. */
2582 gdb_assert (adv > 0);
2583 }
2584
2585 /* Update the iterator. */
2586 it->call_index = bfun->number - 1;
2587 it->insn_index = index;
2588
2589 return steps;
2590 }
2591
2592 /* See btrace.h. */
2593
2594 int
2595 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2596 const struct btrace_insn_iterator *rhs)
2597 {
2598 gdb_assert (lhs->btinfo == rhs->btinfo);
2599
2600 if (lhs->call_index != rhs->call_index)
2601 return lhs->call_index - rhs->call_index;
2602
2603 return lhs->insn_index - rhs->insn_index;
2604 }
2605
2606 /* See btrace.h. */
2607
2608 int
2609 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2610 const struct btrace_thread_info *btinfo,
2611 unsigned int number)
2612 {
2613 const struct btrace_function *bfun;
2614 unsigned int upper, lower;
2615
2616 if (btinfo->functions.empty ())
2617 return 0;
2618
2619 lower = 0;
2620 bfun = &btinfo->functions[lower];
2621 if (number < bfun->insn_offset)
2622 return 0;
2623
2624 upper = btinfo->functions.size () - 1;
2625 bfun = &btinfo->functions[upper];
2626 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2627 return 0;
2628
2629 /* We assume that there are no holes in the numbering. */
2630 for (;;)
2631 {
2632 const unsigned int average = lower + (upper - lower) / 2;
2633
2634 bfun = &btinfo->functions[average];
2635
2636 if (number < bfun->insn_offset)
2637 {
2638 upper = average - 1;
2639 continue;
2640 }
2641
2642 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2643 {
2644 lower = average + 1;
2645 continue;
2646 }
2647
2648 break;
2649 }
2650
2651 it->btinfo = btinfo;
2652 it->call_index = bfun->number - 1;
2653 it->insn_index = number - bfun->insn_offset;
2654 return 1;
2655 }
2656
2657 /* Returns true if the recording ends with a function segment that
2658 contains only a single (i.e. the current) instruction. */
2659
2660 static bool
2661 btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2662 {
2663 const btrace_function *bfun;
2664
2665 if (btinfo->functions.empty ())
2666 return false;
2667
2668 bfun = &btinfo->functions.back ();
2669 if (bfun->errcode != 0)
2670 return false;
2671
2672 return ftrace_call_num_insn (bfun) == 1;
2673 }
2674
2675 /* See btrace.h. */
2676
2677 const struct btrace_function *
2678 btrace_call_get (const struct btrace_call_iterator *it)
2679 {
2680 if (it->index >= it->btinfo->functions.size ())
2681 return NULL;
2682
2683 return &it->btinfo->functions[it->index];
2684 }
2685
2686 /* See btrace.h. */
2687
2688 unsigned int
2689 btrace_call_number (const struct btrace_call_iterator *it)
2690 {
2691 const unsigned int length = it->btinfo->functions.size ();
2692
2693 /* If the last function segment contains only a single instruction (i.e. the
2694 current instruction), skip it. */
2695 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2696 return length;
2697
2698 return it->index + 1;
2699 }
2700
2701 /* See btrace.h. */
2702
2703 void
2704 btrace_call_begin (struct btrace_call_iterator *it,
2705 const struct btrace_thread_info *btinfo)
2706 {
2707 if (btinfo->functions.empty ())
2708 error (_("No trace."));
2709
2710 it->btinfo = btinfo;
2711 it->index = 0;
2712 }
2713
2714 /* See btrace.h. */
2715
2716 void
2717 btrace_call_end (struct btrace_call_iterator *it,
2718 const struct btrace_thread_info *btinfo)
2719 {
2720 if (btinfo->functions.empty ())
2721 error (_("No trace."));
2722
2723 it->btinfo = btinfo;
2724 it->index = btinfo->functions.size ();
2725 }
2726
2727 /* See btrace.h. */
2728
2729 unsigned int
2730 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2731 {
2732 const unsigned int length = it->btinfo->functions.size ();
2733
2734 if (it->index + stride < length - 1)
2735 /* Default case: Simply advance the iterator. */
2736 it->index += stride;
2737 else if (it->index + stride == length - 1)
2738 {
2739 /* We land exactly at the last function segment. If it contains only one
2740 instruction (i.e. the current instruction) it is not actually part of
2741 the trace. */
2742 if (btrace_ends_with_single_insn (it->btinfo))
2743 it->index = length;
2744 else
2745 it->index = length - 1;
2746 }
2747 else
2748 {
2749 /* We land past the last function segment and have to adjust the stride.
2750 If the last function segment contains only one instruction (i.e. the
2751 current instruction) it is not actually part of the trace. */
2752 if (btrace_ends_with_single_insn (it->btinfo))
2753 stride = length - it->index - 1;
2754 else
2755 stride = length - it->index;
2756
2757 it->index = length;
2758 }
2759
2760 return stride;
2761 }
2762
2763 /* See btrace.h. */
2764
2765 unsigned int
2766 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2767 {
2768 const unsigned int length = it->btinfo->functions.size ();
2769 int steps = 0;
2770
2771 gdb_assert (it->index <= length);
2772
2773 if (stride == 0 || it->index == 0)
2774 return 0;
2775
2776 /* If we are at the end, the first step is a special case. If the last
2777 function segment contains only one instruction (i.e. the current
2778 instruction) it is not actually part of the trace. To be able to step
2779 over this instruction, we need at least one more function segment. */
2780 if ((it->index == length) && (length > 1))
2781 {
2782 if (btrace_ends_with_single_insn (it->btinfo))
2783 it->index = length - 2;
2784 else
2785 it->index = length - 1;
2786
2787 steps = 1;
2788 stride -= 1;
2789 }
2790
2791 stride = std::min (stride, it->index);
2792
2793 it->index -= stride;
2794 return steps + stride;
2795 }
2796
2797 /* See btrace.h. */
2798
2799 int
2800 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2801 const struct btrace_call_iterator *rhs)
2802 {
2803 gdb_assert (lhs->btinfo == rhs->btinfo);
2804 return (int) (lhs->index - rhs->index);
2805 }
2806
2807 /* See btrace.h. */
2808
2809 int
2810 btrace_find_call_by_number (struct btrace_call_iterator *it,
2811 const struct btrace_thread_info *btinfo,
2812 unsigned int number)
2813 {
2814 const unsigned int length = btinfo->functions.size ();
2815
2816 if ((number == 0) || (number > length))
2817 return 0;
2818
2819 it->btinfo = btinfo;
2820 it->index = number - 1;
2821 return 1;
2822 }
2823
2824 /* See btrace.h. */
2825
2826 void
2827 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2828 const struct btrace_insn_iterator *begin,
2829 const struct btrace_insn_iterator *end)
2830 {
2831 if (btinfo->insn_history == NULL)
2832 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2833
2834 btinfo->insn_history->begin = *begin;
2835 btinfo->insn_history->end = *end;
2836 }
2837
2838 /* See btrace.h. */
2839
2840 void
2841 btrace_set_call_history (struct btrace_thread_info *btinfo,
2842 const struct btrace_call_iterator *begin,
2843 const struct btrace_call_iterator *end)
2844 {
2845 gdb_assert (begin->btinfo == end->btinfo);
2846
2847 if (btinfo->call_history == NULL)
2848 btinfo->call_history = XCNEW (struct btrace_call_history);
2849
2850 btinfo->call_history->begin = *begin;
2851 btinfo->call_history->end = *end;
2852 }
2853
2854 /* See btrace.h. */
2855
2856 int
2857 btrace_is_replaying (struct thread_info *tp)
2858 {
2859 return tp->btrace.replay != NULL;
2860 }
2861
2862 /* See btrace.h. */
2863
2864 int
2865 btrace_is_empty (struct thread_info *tp)
2866 {
2867 struct btrace_insn_iterator begin, end;
2868 struct btrace_thread_info *btinfo;
2869
2870 btinfo = &tp->btrace;
2871
2872 if (btinfo->functions.empty ())
2873 return 1;
2874
2875 btrace_insn_begin (&begin, btinfo);
2876 btrace_insn_end (&end, btinfo);
2877
2878 return btrace_insn_cmp (&begin, &end) == 0;
2879 }
2880
2881 #if defined (HAVE_LIBIPT)
2882
2883 /* Print a single packet. */
2884
2885 static void
2886 pt_print_packet (const struct pt_packet *packet)
2887 {
2888 switch (packet->type)
2889 {
2890 default:
2891 gdb_printf (("[??: %x]"), packet->type);
2892 break;
2893
2894 case ppt_psb:
2895 gdb_printf (("psb"));
2896 break;
2897
2898 case ppt_psbend:
2899 gdb_printf (("psbend"));
2900 break;
2901
2902 case ppt_pad:
2903 gdb_printf (("pad"));
2904 break;
2905
2906 case ppt_tip:
2907 gdb_printf (("tip %u: 0x%" PRIx64 ""),
2908 packet->payload.ip.ipc,
2909 packet->payload.ip.ip);
2910 break;
2911
2912 case ppt_tip_pge:
2913 gdb_printf (("tip.pge %u: 0x%" PRIx64 ""),
2914 packet->payload.ip.ipc,
2915 packet->payload.ip.ip);
2916 break;
2917
2918 case ppt_tip_pgd:
2919 gdb_printf (("tip.pgd %u: 0x%" PRIx64 ""),
2920 packet->payload.ip.ipc,
2921 packet->payload.ip.ip);
2922 break;
2923
2924 case ppt_fup:
2925 gdb_printf (("fup %u: 0x%" PRIx64 ""),
2926 packet->payload.ip.ipc,
2927 packet->payload.ip.ip);
2928 break;
2929
2930 case ppt_tnt_8:
2931 gdb_printf (("tnt-8 %u: 0x%" PRIx64 ""),
2932 packet->payload.tnt.bit_size,
2933 packet->payload.tnt.payload);
2934 break;
2935
2936 case ppt_tnt_64:
2937 gdb_printf (("tnt-64 %u: 0x%" PRIx64 ""),
2938 packet->payload.tnt.bit_size,
2939 packet->payload.tnt.payload);
2940 break;
2941
2942 case ppt_pip:
2943 gdb_printf (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2944 packet->payload.pip.nr ? (" nr") : (""));
2945 break;
2946
2947 case ppt_tsc:
2948 gdb_printf (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2949 break;
2950
2951 case ppt_cbr:
2952 gdb_printf (("cbr %u"), packet->payload.cbr.ratio);
2953 break;
2954
2955 case ppt_mode:
2956 switch (packet->payload.mode.leaf)
2957 {
2958 default:
2959 gdb_printf (("mode %u"), packet->payload.mode.leaf);
2960 break;
2961
2962 case pt_mol_exec:
2963 gdb_printf (("mode.exec%s%s"),
2964 packet->payload.mode.bits.exec.csl
2965 ? (" cs.l") : (""),
2966 packet->payload.mode.bits.exec.csd
2967 ? (" cs.d") : (""));
2968 break;
2969
2970 case pt_mol_tsx:
2971 gdb_printf (("mode.tsx%s%s"),
2972 packet->payload.mode.bits.tsx.intx
2973 ? (" intx") : (""),
2974 packet->payload.mode.bits.tsx.abrt
2975 ? (" abrt") : (""));
2976 break;
2977 }
2978 break;
2979
2980 case ppt_ovf:
2981 gdb_printf (("ovf"));
2982 break;
2983
2984 case ppt_stop:
2985 gdb_printf (("stop"));
2986 break;
2987
2988 case ppt_vmcs:
2989 gdb_printf (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2990 break;
2991
2992 case ppt_tma:
2993 gdb_printf (("tma %x %x"), packet->payload.tma.ctc,
2994 packet->payload.tma.fc);
2995 break;
2996
2997 case ppt_mtc:
2998 gdb_printf (("mtc %x"), packet->payload.mtc.ctc);
2999 break;
3000
3001 case ppt_cyc:
3002 gdb_printf (("cyc %" PRIx64 ""), packet->payload.cyc.value);
3003 break;
3004
3005 case ppt_mnt:
3006 gdb_printf (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
3007 break;
3008
3009 #if (LIBIPT_VERSION >= 0x200)
3010 case ppt_ptw:
3011 gdb_printf (("ptw %u: 0x%" PRIx64 "%s"), packet->payload.ptw.plc,
3012 packet->payload.ptw.payload,
3013 packet->payload.ptw.ip ? (" ip") : (""));
3014 break;
3015 #endif /* defined (LIBIPT_VERSION >= 0x200) */
3016
3017 #if (LIBIPT_VERSION >= 0x201)
3018 case ppt_cfe:
3019 gdb_printf (("cfe %u: 0x%x%s"), packet->payload.cfe.type,
3020 packet->payload.cfe.vector,
3021 packet->payload.cfe.ip ? (" ip") : (""));
3022 break;
3023
3024 case ppt_evd:
3025 gdb_printf (("evd %u: 0x%" PRIx64 ""), packet->payload.evd.type,
3026 packet->payload.evd.payload);
3027 break;
3028 #endif /* defined (LIBIPT_VERSION >= 0x201) */
3029 }
3030 }
3031
3032 /* Decode packets into MAINT using DECODER. */
3033
3034 static void
3035 btrace_maint_decode_pt (struct btrace_maint_info *maint,
3036 struct pt_packet_decoder *decoder)
3037 {
3038 int errcode;
3039
3040 if (maint->variant.pt.packets == NULL)
3041 maint->variant.pt.packets = new std::vector<btrace_pt_packet>;
3042
3043 for (;;)
3044 {
3045 struct btrace_pt_packet packet;
3046
3047 errcode = pt_pkt_sync_forward (decoder);
3048 if (errcode < 0)
3049 break;
3050
3051 for (;;)
3052 {
3053 pt_pkt_get_offset (decoder, &packet.offset);
3054
3055 errcode = pt_pkt_next (decoder, &packet.packet,
3056 sizeof(packet.packet));
3057 if (errcode < 0)
3058 break;
3059
3060 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
3061 {
3062 packet.errcode = pt_errcode (errcode);
3063 maint->variant.pt.packets->push_back (packet);
3064 }
3065 }
3066
3067 if (errcode == -pte_eos)
3068 break;
3069
3070 packet.errcode = pt_errcode (errcode);
3071 maint->variant.pt.packets->push_back (packet);
3072
3073 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
3074 packet.offset, pt_errstr (packet.errcode));
3075 }
3076
3077 if (errcode != -pte_eos)
3078 warning (_("Failed to synchronize onto the Intel Processor Trace "
3079 "stream: %s."), pt_errstr (pt_errcode (errcode)));
3080 }
3081
3082 /* Update the packet history in BTINFO. */
3083
3084 static void
3085 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
3086 {
3087 struct pt_packet_decoder *decoder;
3088 const struct btrace_cpu *cpu;
3089 struct btrace_data_pt *pt;
3090 struct pt_config config;
3091 int errcode;
3092
3093 pt = &btinfo->data.variant.pt;
3094
3095 /* Nothing to do if there is no trace. */
3096 if (pt->size == 0)
3097 return;
3098
3099 memset (&config, 0, sizeof(config));
3100
3101 config.size = sizeof (config);
3102 config.begin = pt->data;
3103 config.end = pt->data + pt->size;
3104
3105 cpu = record_btrace_get_cpu ();
3106 if (cpu == nullptr)
3107 cpu = &pt->config.cpu;
3108
3109 /* We treat an unknown vendor as 'no errata'. */
3110 if (cpu->vendor != CV_UNKNOWN)
3111 {
3112 config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
3113 config.cpu.family = cpu->family;
3114 config.cpu.model = cpu->model;
3115 config.cpu.stepping = cpu->stepping;
3116
3117 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3118 if (errcode < 0)
3119 error (_("Failed to configure the Intel Processor Trace "
3120 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
3121 }
3122
3123 decoder = pt_pkt_alloc_decoder (&config);
3124 if (decoder == NULL)
3125 error (_("Failed to allocate the Intel Processor Trace decoder."));
3126
3127 try
3128 {
3129 btrace_maint_decode_pt (&btinfo->maint, decoder);
3130 }
3131 catch (const gdb_exception &except)
3132 {
3133 pt_pkt_free_decoder (decoder);
3134
3135 if (except.reason < 0)
3136 throw;
3137 }
3138
3139 pt_pkt_free_decoder (decoder);
3140 }
3141
3142 #endif /* !defined (HAVE_LIBIPT) */
3143
3144 /* Update the packet maintenance information for BTINFO and store the
3145 low and high bounds into BEGIN and END, respectively.
3146 Store the current iterator state into FROM and TO. */
3147
3148 static void
3149 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3150 unsigned int *begin, unsigned int *end,
3151 unsigned int *from, unsigned int *to)
3152 {
3153 switch (btinfo->data.format)
3154 {
3155 default:
3156 *begin = 0;
3157 *end = 0;
3158 *from = 0;
3159 *to = 0;
3160 break;
3161
3162 case BTRACE_FORMAT_BTS:
3163 /* Nothing to do - we operate directly on BTINFO->DATA. */
3164 *begin = 0;
3165 *end = btinfo->data.variant.bts.blocks->size ();
3166 *from = btinfo->maint.variant.bts.packet_history.begin;
3167 *to = btinfo->maint.variant.bts.packet_history.end;
3168 break;
3169
3170 #if defined (HAVE_LIBIPT)
3171 case BTRACE_FORMAT_PT:
3172 if (btinfo->maint.variant.pt.packets == nullptr)
3173 btinfo->maint.variant.pt.packets = new std::vector<btrace_pt_packet>;
3174
3175 if (btinfo->maint.variant.pt.packets->empty ())
3176 btrace_maint_update_pt_packets (btinfo);
3177
3178 *begin = 0;
3179 *end = btinfo->maint.variant.pt.packets->size ();
3180 *from = btinfo->maint.variant.pt.packet_history.begin;
3181 *to = btinfo->maint.variant.pt.packet_history.end;
3182 break;
3183 #endif /* defined (HAVE_LIBIPT) */
3184 }
3185 }
3186
3187 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3188 update the current iterator position. */
3189
3190 static void
3191 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3192 unsigned int begin, unsigned int end)
3193 {
3194 switch (btinfo->data.format)
3195 {
3196 default:
3197 break;
3198
3199 case BTRACE_FORMAT_BTS:
3200 {
3201 const std::vector<btrace_block> &blocks
3202 = *btinfo->data.variant.bts.blocks;
3203 unsigned int blk;
3204
3205 for (blk = begin; blk < end; ++blk)
3206 {
3207 const btrace_block &block = blocks.at (blk);
3208
3209 gdb_printf ("%u\tbegin: %s, end: %s\n", blk,
3210 core_addr_to_string_nz (block.begin),
3211 core_addr_to_string_nz (block.end));
3212 }
3213
3214 btinfo->maint.variant.bts.packet_history.begin = begin;
3215 btinfo->maint.variant.bts.packet_history.end = end;
3216 }
3217 break;
3218
3219 #if defined (HAVE_LIBIPT)
3220 case BTRACE_FORMAT_PT:
3221 {
3222 const std::vector<btrace_pt_packet> &packets
3223 = *btinfo->maint.variant.pt.packets;
3224 unsigned int pkt;
3225
3226 for (pkt = begin; pkt < end; ++pkt)
3227 {
3228 const struct btrace_pt_packet &packet = packets.at (pkt);
3229
3230 gdb_printf ("%u\t", pkt);
3231 gdb_printf ("0x%" PRIx64 "\t", packet.offset);
3232
3233 if (packet.errcode == pte_ok)
3234 pt_print_packet (&packet.packet);
3235 else
3236 gdb_printf ("[error: %s]", pt_errstr (packet.errcode));
3237
3238 gdb_printf ("\n");
3239 }
3240
3241 btinfo->maint.variant.pt.packet_history.begin = begin;
3242 btinfo->maint.variant.pt.packet_history.end = end;
3243 }
3244 break;
3245 #endif /* defined (HAVE_LIBIPT) */
3246 }
3247 }
3248
3249 /* Read a number from an argument string. */
3250
3251 static unsigned int
3252 get_uint (const char **arg)
3253 {
3254 const char *begin, *pos;
3255 char *end;
3256 unsigned long number;
3257
3258 begin = *arg;
3259 pos = skip_spaces (begin);
3260
3261 if (!isdigit (*pos))
3262 error (_("Expected positive number, got: %s."), pos);
3263
3264 number = strtoul (pos, &end, 10);
3265 if (number > UINT_MAX)
3266 error (_("Number too big."));
3267
3268 *arg += (end - begin);
3269
3270 return (unsigned int) number;
3271 }
3272
3273 /* Read a context size from an argument string. */
3274
3275 static int
3276 get_context_size (const char **arg)
3277 {
3278 const char *pos = skip_spaces (*arg);
3279
3280 if (!isdigit (*pos))
3281 error (_("Expected positive number, got: %s."), pos);
3282
3283 char *end;
3284 long result = strtol (pos, &end, 10);
3285 *arg = end;
3286 return result;
3287 }
3288
3289 /* Complain about junk at the end of an argument string. */
3290
3291 static void
3292 no_chunk (const char *arg)
3293 {
3294 if (*arg != 0)
3295 error (_("Junk after argument: %s."), arg);
3296 }
3297
3298 /* The "maintenance btrace packet-history" command. */
3299
3300 static void
3301 maint_btrace_packet_history_cmd (const char *arg, int from_tty)
3302 {
3303 struct btrace_thread_info *btinfo;
3304 unsigned int size, begin, end, from, to;
3305
3306 thread_info *tp = current_inferior ()->find_thread (inferior_ptid);
3307 if (tp == NULL)
3308 error (_("No thread."));
3309
3310 size = 10;
3311 btinfo = &tp->btrace;
3312
3313 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3314 if (begin == end)
3315 {
3316 gdb_printf (_("No trace.\n"));
3317 return;
3318 }
3319
3320 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3321 {
3322 from = to;
3323
3324 if (end - from < size)
3325 size = end - from;
3326 to = from + size;
3327 }
3328 else if (strcmp (arg, "-") == 0)
3329 {
3330 to = from;
3331
3332 if (to - begin < size)
3333 size = to - begin;
3334 from = to - size;
3335 }
3336 else
3337 {
3338 from = get_uint (&arg);
3339 if (end <= from)
3340 error (_("'%u' is out of range."), from);
3341
3342 arg = skip_spaces (arg);
3343 if (*arg == ',')
3344 {
3345 arg = skip_spaces (++arg);
3346
3347 if (*arg == '+')
3348 {
3349 arg += 1;
3350 size = get_context_size (&arg);
3351
3352 no_chunk (arg);
3353
3354 if (end - from < size)
3355 size = end - from;
3356 to = from + size;
3357 }
3358 else if (*arg == '-')
3359 {
3360 arg += 1;
3361 size = get_context_size (&arg);
3362
3363 no_chunk (arg);
3364
3365 /* Include the packet given as first argument. */
3366 from += 1;
3367 to = from;
3368
3369 if (to - begin < size)
3370 size = to - begin;
3371 from = to - size;
3372 }
3373 else
3374 {
3375 to = get_uint (&arg);
3376
3377 /* Include the packet at the second argument and silently
3378 truncate the range. */
3379 if (to < end)
3380 to += 1;
3381 else
3382 to = end;
3383
3384 no_chunk (arg);
3385 }
3386 }
3387 else
3388 {
3389 no_chunk (arg);
3390
3391 if (end - from < size)
3392 size = end - from;
3393 to = from + size;
3394 }
3395
3396 dont_repeat ();
3397 }
3398
3399 btrace_maint_print_packets (btinfo, from, to);
3400 }
3401
3402 /* The "maintenance btrace clear-packet-history" command. */
3403
3404 static void
3405 maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
3406 {
3407 if (args != NULL && *args != 0)
3408 error (_("Invalid argument."));
3409
3410 if (inferior_ptid == null_ptid)
3411 error (_("No thread."));
3412
3413 thread_info *tp = inferior_thread ();
3414 btrace_thread_info *btinfo = &tp->btrace;
3415
3416 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3417 btrace_maint_clear (btinfo);
3418 btinfo->data.clear ();
3419 }
3420
3421 /* The "maintenance btrace clear" command. */
3422
3423 static void
3424 maint_btrace_clear_cmd (const char *args, int from_tty)
3425 {
3426 if (args != NULL && *args != 0)
3427 error (_("Invalid argument."));
3428
3429 if (inferior_ptid == null_ptid)
3430 error (_("No thread."));
3431
3432 thread_info *tp = inferior_thread ();
3433 btrace_clear (tp);
3434 }
3435
3436 /* The "maintenance info btrace" command. */
3437
3438 static void
3439 maint_info_btrace_cmd (const char *args, int from_tty)
3440 {
3441 struct btrace_thread_info *btinfo;
3442 const struct btrace_config *conf;
3443
3444 if (args != NULL && *args != 0)
3445 error (_("Invalid argument."));
3446
3447 if (inferior_ptid == null_ptid)
3448 error (_("No thread."));
3449
3450 thread_info *tp = inferior_thread ();
3451
3452 btinfo = &tp->btrace;
3453
3454 conf = btrace_conf (btinfo);
3455 if (conf == NULL)
3456 error (_("No btrace configuration."));
3457
3458 gdb_printf (_("Format: %s.\n"),
3459 btrace_format_string (conf->format));
3460
3461 switch (conf->format)
3462 {
3463 default:
3464 break;
3465
3466 case BTRACE_FORMAT_BTS:
3467 gdb_printf (_("Number of packets: %zu.\n"),
3468 btinfo->data.variant.bts.blocks->size ());
3469 break;
3470
3471 #if defined (HAVE_LIBIPT)
3472 case BTRACE_FORMAT_PT:
3473 {
3474 struct pt_version version;
3475
3476 version = pt_library_version ();
3477 gdb_printf (_("Version: %u.%u.%u%s.\n"), version.major,
3478 version.minor, version.build,
3479 version.ext != NULL ? version.ext : "");
3480
3481 btrace_maint_update_pt_packets (btinfo);
3482 gdb_printf (_("Number of packets: %zu.\n"),
3483 ((btinfo->maint.variant.pt.packets == nullptr)
3484 ? 0 : btinfo->maint.variant.pt.packets->size ()));
3485 }
3486 break;
3487 #endif /* defined (HAVE_LIBIPT) */
3488 }
3489 }
3490
3491 /* The "maint show btrace pt skip-pad" show value function. */
3492
3493 static void
3494 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3495 struct cmd_list_element *c,
3496 const char *value)
3497 {
3498 gdb_printf (file, _("Skip PAD packets is %s.\n"), value);
3499 }
3500
3501
3502 /* Initialize btrace maintenance commands. */
3503
3504 INIT_GDB_FILE (btrace)
3505 {
3506 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3507 _("Info about branch tracing data."), &maintenanceinfolist);
3508
3509 add_basic_prefix_cmd ("btrace", class_maintenance,
3510 _("Branch tracing maintenance commands."),
3511 &maint_btrace_cmdlist, 0, &maintenancelist);
3512
3513 add_setshow_prefix_cmd ("btrace", class_maintenance,
3514 _("Set branch tracing specific variables."),
3515 _("Show branch tracing specific variables."),
3516 &maint_btrace_set_cmdlist,
3517 &maint_btrace_show_cmdlist,
3518 &maintenance_set_cmdlist,
3519 &maintenance_show_cmdlist);
3520
3521 add_setshow_prefix_cmd ("pt", class_maintenance,
3522 _("Set Intel Processor Trace specific variables."),
3523 _("Show Intel Processor Trace specific variables."),
3524 &maint_btrace_pt_set_cmdlist,
3525 &maint_btrace_pt_show_cmdlist,
3526 &maint_btrace_set_cmdlist,
3527 &maint_btrace_show_cmdlist);
3528
3529 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3530 &maint_btrace_pt_skip_pad, _("\
3531 Set whether PAD packets should be skipped in the btrace packet history."), _("\
3532 Show whether PAD packets should be skipped in the btrace packet history."),_("\
3533 When enabled, PAD packets are ignored in the btrace packet history."),
3534 NULL, show_maint_btrace_pt_skip_pad,
3535 &maint_btrace_pt_set_cmdlist,
3536 &maint_btrace_pt_show_cmdlist);
3537
3538 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3539 _("Print the raw branch tracing data.\n\
3540 With no argument, print ten more packets after the previous ten-line print.\n\
3541 With '-' as argument print ten packets before a previous ten-line print.\n\
3542 One argument specifies the starting packet of a ten-line print.\n\
3543 Two arguments with comma between specify starting and ending packets to \
3544 print.\n\
3545 Preceded with '+'/'-' the second argument specifies the distance from the \
3546 first."),
3547 &maint_btrace_cmdlist);
3548
3549 add_cmd ("clear-packet-history", class_maintenance,
3550 maint_btrace_clear_packet_history_cmd,
3551 _("Clears the branch tracing packet history.\n\
3552 Discards the raw branch tracing data but not the execution history data."),
3553 &maint_btrace_cmdlist);
3554
3555 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3556 _("Clears the branch tracing data.\n\
3557 Discards the raw branch tracing data and the execution history data.\n\
3558 The next 'record' command will fetch the branch tracing data anew."),
3559 &maint_btrace_cmdlist);
3560
3561 }