]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/btrace.c
Finalized intl-update patches
[thirdparty/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
213516ef 3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625 25#include "inferior.h"
4de283e4 26#include "target.h"
02d27625 27#include "record.h"
d55e5aa6 28#include "symtab.h"
4de283e4
TT
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
4de283e4 32#include "regcache.h"
268a13a5 33#include "gdbsupport/rsp-low.h"
4de283e4
TT
34#include "gdbcmd.h"
35#include "cli/cli-utils.h"
0d12e84c 36#include "gdbarch.h"
b20a6524 37
4a4495d6
MM
38/* For maintenance commands. */
39#include "record-btrace.h"
40
b20a6524 41#include <inttypes.h>
b0627500 42#include <ctype.h>
325fac50 43#include <algorithm>
b0627500
MM
44
45/* Command lists for btrace maintenance commands. */
46static struct cmd_list_element *maint_btrace_cmdlist;
47static struct cmd_list_element *maint_btrace_set_cmdlist;
48static struct cmd_list_element *maint_btrace_show_cmdlist;
49static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
50static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
51
52/* Control whether to skip PAD packets when computing the packet history. */
491144b5 53static bool maint_btrace_pt_skip_pad = true;
b20a6524
MM
54
55static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
56
57/* Print a record debug message. Use do ... while (0) to avoid ambiguities
58 when used in if statements. */
59
60#define DEBUG(msg, args...) \
61 do \
62 { \
63 if (record_debug != 0) \
6cb06a8c
TT
64 gdb_printf (gdb_stdlog, \
65 "[btrace] " msg "\n", ##args); \
02d27625
MM
66 } \
67 while (0)
68
69#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
70
02d27625
MM
71/* Return the function name of a recorded function segment for printing.
72 This function never returns NULL. */
73
74static const char *
23a7fe75 75ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
76{
77 struct minimal_symbol *msym;
78 struct symbol *sym;
79
80 msym = bfun->msym;
81 sym = bfun->sym;
82
83 if (sym != NULL)
987012b8 84 return sym->print_name ();
02d27625
MM
85
86 if (msym != NULL)
c9d95fa3 87 return msym->print_name ();
02d27625
MM
88
89 return "<unknown>";
90}
91
92/* Return the file name of a recorded function segment for printing.
93 This function never returns NULL. */
94
95static const char *
23a7fe75 96ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
97{
98 struct symbol *sym;
99 const char *filename;
100
101 sym = bfun->sym;
102
103 if (sym != NULL)
4206d69e 104 filename = symtab_to_filename_for_display (sym->symtab ());
02d27625
MM
105 else
106 filename = "<unknown>";
107
108 return filename;
109}
110
23a7fe75
MM
111/* Return a string representation of the address of an instruction.
112 This function never returns NULL. */
02d27625 113
23a7fe75
MM
114static const char *
115ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 116{
23a7fe75
MM
117 if (insn == NULL)
118 return "<nil>";
119
120 return core_addr_to_string_nz (insn->pc);
02d27625
MM
121}
122
23a7fe75 123/* Print an ftrace debug status message. */
02d27625
MM
124
125static void
23a7fe75 126ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 127{
23a7fe75
MM
128 const char *fun, *file;
129 unsigned int ibegin, iend;
ce0dfbea 130 int level;
23a7fe75
MM
131
132 fun = ftrace_print_function_name (bfun);
133 file = ftrace_print_filename (bfun);
134 level = bfun->level;
135
23a7fe75 136 ibegin = bfun->insn_offset;
0860c437 137 iend = ibegin + bfun->insn.size ();
23a7fe75 138
ce0dfbea
MM
139 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
140 prefix, fun, file, level, ibegin, iend);
02d27625
MM
141}
142
69090cee
TW
143/* Return the number of instructions in a given function call segment. */
144
145static unsigned int
146ftrace_call_num_insn (const struct btrace_function* bfun)
147{
148 if (bfun == NULL)
149 return 0;
150
151 /* A gap is always counted as one instruction. */
152 if (bfun->errcode != 0)
153 return 1;
154
0860c437 155 return bfun->insn.size ();
69090cee
TW
156}
157
42bfe59e
TW
158/* Return the function segment with the given NUMBER or NULL if no such segment
159 exists. BTINFO is the branch trace information for the current thread. */
160
161static struct btrace_function *
08c3f6d2
TW
162ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
163 unsigned int number)
164{
165 if (number == 0 || number > btinfo->functions.size ())
166 return NULL;
167
168 return &btinfo->functions[number - 1];
169}
170
171/* A const version of the function above. */
172
173static const struct btrace_function *
42bfe59e
TW
174ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
175 unsigned int number)
176{
177 if (number == 0 || number > btinfo->functions.size ())
178 return NULL;
179
08c3f6d2 180 return &btinfo->functions[number - 1];
42bfe59e
TW
181}
182
23a7fe75
MM
183/* Return non-zero if BFUN does not match MFUN and FUN,
184 return zero otherwise. */
02d27625
MM
185
186static int
23a7fe75
MM
187ftrace_function_switched (const struct btrace_function *bfun,
188 const struct minimal_symbol *mfun,
189 const struct symbol *fun)
02d27625
MM
190{
191 struct minimal_symbol *msym;
192 struct symbol *sym;
193
02d27625
MM
194 msym = bfun->msym;
195 sym = bfun->sym;
196
197 /* If the minimal symbol changed, we certainly switched functions. */
198 if (mfun != NULL && msym != NULL
c9d95fa3 199 && strcmp (mfun->linkage_name (), msym->linkage_name ()) != 0)
02d27625
MM
200 return 1;
201
202 /* If the symbol changed, we certainly switched functions. */
203 if (fun != NULL && sym != NULL)
204 {
205 const char *bfname, *fname;
206
207 /* Check the function name. */
987012b8 208 if (strcmp (fun->linkage_name (), sym->linkage_name ()) != 0)
02d27625
MM
209 return 1;
210
211 /* Check the location of those functions, as well. */
4206d69e
TT
212 bfname = symtab_to_fullname (sym->symtab ());
213 fname = symtab_to_fullname (fun->symtab ());
02d27625
MM
214 if (filename_cmp (fname, bfname) != 0)
215 return 1;
216 }
217
23a7fe75
MM
218 /* If we lost symbol information, we switched functions. */
219 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
220 return 1;
221
222 /* If we gained symbol information, we switched functions. */
223 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
224 return 1;
225
02d27625
MM
226 return 0;
227}
228
8286623c
TW
229/* Allocate and initialize a new branch trace function segment at the end of
230 the trace.
17b89b34 231 BTINFO is the branch trace information for the current thread.
08c3f6d2
TW
232 MFUN and FUN are the symbol information we have for this function.
233 This invalidates all struct btrace_function pointer currently held. */
23a7fe75
MM
234
235static struct btrace_function *
17b89b34 236ftrace_new_function (struct btrace_thread_info *btinfo,
23a7fe75
MM
237 struct minimal_symbol *mfun,
238 struct symbol *fun)
239{
08c3f6d2
TW
240 int level;
241 unsigned int number, insn_offset;
23a7fe75 242
b54b03bd 243 if (btinfo->functions.empty ())
5de9129b 244 {
08c3f6d2
TW
245 /* Start counting NUMBER and INSN_OFFSET at one. */
246 level = 0;
247 number = 1;
248 insn_offset = 1;
5de9129b
MM
249 }
250 else
23a7fe75 251 {
08c3f6d2
TW
252 const struct btrace_function *prev = &btinfo->functions.back ();
253 level = prev->level;
254 number = prev->number + 1;
255 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
23a7fe75
MM
256 }
257
08c3f6d2
TW
258 btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
259 return &btinfo->functions.back ();
02d27625
MM
260}
261
23a7fe75 262/* Update the UP field of a function segment. */
02d27625 263
23a7fe75
MM
264static void
265ftrace_update_caller (struct btrace_function *bfun,
266 struct btrace_function *caller,
04902b09 267 btrace_function_flags flags)
02d27625 268{
42bfe59e 269 if (bfun->up != 0)
23a7fe75 270 ftrace_debug (bfun, "updating caller");
02d27625 271
42bfe59e 272 bfun->up = caller->number;
23a7fe75
MM
273 bfun->flags = flags;
274
275 ftrace_debug (bfun, "set caller");
d87fdac3 276 ftrace_debug (caller, "..to");
23a7fe75
MM
277}
278
279/* Fix up the caller for all segments of a function. */
280
281static void
4aeb0dfc
TW
282ftrace_fixup_caller (struct btrace_thread_info *btinfo,
283 struct btrace_function *bfun,
23a7fe75 284 struct btrace_function *caller,
04902b09 285 btrace_function_flags flags)
23a7fe75 286{
4aeb0dfc 287 unsigned int prev, next;
23a7fe75 288
4aeb0dfc
TW
289 prev = bfun->prev;
290 next = bfun->next;
23a7fe75
MM
291 ftrace_update_caller (bfun, caller, flags);
292
293 /* Update all function segments belonging to the same function. */
4aeb0dfc
TW
294 for (; prev != 0; prev = bfun->prev)
295 {
296 bfun = ftrace_find_call_by_number (btinfo, prev);
297 ftrace_update_caller (bfun, caller, flags);
298 }
23a7fe75 299
4aeb0dfc
TW
300 for (; next != 0; next = bfun->next)
301 {
302 bfun = ftrace_find_call_by_number (btinfo, next);
303 ftrace_update_caller (bfun, caller, flags);
304 }
23a7fe75
MM
305}
306
8286623c 307/* Add a new function segment for a call at the end of the trace.
17b89b34 308 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
309 MFUN and FUN are the symbol information we have for this function. */
310
311static struct btrace_function *
17b89b34 312ftrace_new_call (struct btrace_thread_info *btinfo,
23a7fe75
MM
313 struct minimal_symbol *mfun,
314 struct symbol *fun)
315{
b54b03bd 316 const unsigned int length = btinfo->functions.size ();
8286623c 317 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
23a7fe75 318
42bfe59e 319 bfun->up = length;
31fd9caa 320 bfun->level += 1;
23a7fe75
MM
321
322 ftrace_debug (bfun, "new call");
323
324 return bfun;
325}
326
8286623c 327/* Add a new function segment for a tail call at the end of the trace.
17b89b34 328 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
329 MFUN and FUN are the symbol information we have for this function. */
330
331static struct btrace_function *
17b89b34 332ftrace_new_tailcall (struct btrace_thread_info *btinfo,
23a7fe75
MM
333 struct minimal_symbol *mfun,
334 struct symbol *fun)
335{
b54b03bd 336 const unsigned int length = btinfo->functions.size ();
8286623c 337 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
02d27625 338
42bfe59e 339 bfun->up = length;
31fd9caa 340 bfun->level += 1;
23a7fe75 341 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 342
23a7fe75
MM
343 ftrace_debug (bfun, "new tail call");
344
345 return bfun;
346}
347
d87fdac3 348/* Return the caller of BFUN or NULL if there is none. This function skips
42bfe59e
TW
349 tail calls in the call chain. BTINFO is the branch trace information for
350 the current thread. */
d87fdac3 351static struct btrace_function *
42bfe59e
TW
352ftrace_get_caller (struct btrace_thread_info *btinfo,
353 struct btrace_function *bfun)
d87fdac3 354{
42bfe59e 355 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
d87fdac3 356 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
42bfe59e 357 return ftrace_find_call_by_number (btinfo, bfun->up);
d87fdac3
MM
358
359 return NULL;
360}
361
23a7fe75 362/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
42bfe59e
TW
363 symbol information. BTINFO is the branch trace information for the current
364 thread. */
23a7fe75
MM
365
366static struct btrace_function *
42bfe59e
TW
367ftrace_find_caller (struct btrace_thread_info *btinfo,
368 struct btrace_function *bfun,
23a7fe75
MM
369 struct minimal_symbol *mfun,
370 struct symbol *fun)
371{
42bfe59e 372 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
23a7fe75
MM
373 {
374 /* Skip functions with incompatible symbol information. */
375 if (ftrace_function_switched (bfun, mfun, fun))
376 continue;
377
378 /* This is the function segment we're looking for. */
379 break;
380 }
381
382 return bfun;
383}
384
385/* Find the innermost caller in the back trace of BFUN, skipping all
386 function segments that do not end with a call instruction (e.g.
42bfe59e
TW
387 tail calls ending with a jump). BTINFO is the branch trace information for
388 the current thread. */
23a7fe75
MM
389
390static struct btrace_function *
42bfe59e
TW
391ftrace_find_call (struct btrace_thread_info *btinfo,
392 struct btrace_function *bfun)
23a7fe75 393{
42bfe59e 394 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
02d27625 395 {
31fd9caa
MM
396 /* Skip gaps. */
397 if (bfun->errcode != 0)
398 continue;
23a7fe75 399
0860c437 400 btrace_insn &last = bfun->insn.back ();
02d27625 401
0860c437 402 if (last.iclass == BTRACE_INSN_CALL)
23a7fe75
MM
403 break;
404 }
405
406 return bfun;
407}
408
8286623c
TW
409/* Add a continuation segment for a function into which we return at the end of
410 the trace.
17b89b34 411 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
412 MFUN and FUN are the symbol information we have for this function. */
413
414static struct btrace_function *
17b89b34 415ftrace_new_return (struct btrace_thread_info *btinfo,
23a7fe75
MM
416 struct minimal_symbol *mfun,
417 struct symbol *fun)
418{
08c3f6d2 419 struct btrace_function *prev, *bfun, *caller;
23a7fe75 420
8286623c 421 bfun = ftrace_new_function (btinfo, mfun, fun);
08c3f6d2 422 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
23a7fe75
MM
423
424 /* It is important to start at PREV's caller. Otherwise, we might find
425 PREV itself, if PREV is a recursive function. */
42bfe59e
TW
426 caller = ftrace_find_call_by_number (btinfo, prev->up);
427 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
23a7fe75
MM
428 if (caller != NULL)
429 {
430 /* The caller of PREV is the preceding btrace function segment in this
431 function instance. */
4aeb0dfc 432 gdb_assert (caller->next == 0);
23a7fe75 433
4aeb0dfc
TW
434 caller->next = bfun->number;
435 bfun->prev = caller->number;
23a7fe75
MM
436
437 /* Maintain the function level. */
438 bfun->level = caller->level;
439
440 /* Maintain the call stack. */
441 bfun->up = caller->up;
442 bfun->flags = caller->flags;
443
444 ftrace_debug (bfun, "new return");
445 }
446 else
447 {
448 /* We did not find a caller. This could mean that something went
449 wrong or that the call is simply not included in the trace. */
02d27625 450
23a7fe75 451 /* Let's search for some actual call. */
42bfe59e
TW
452 caller = ftrace_find_call_by_number (btinfo, prev->up);
453 caller = ftrace_find_call (btinfo, caller);
23a7fe75 454 if (caller == NULL)
02d27625 455 {
23a7fe75
MM
456 /* There is no call in PREV's back trace. We assume that the
457 branch trace did not include it. */
458
259ba1e8
MM
459 /* Let's find the topmost function and add a new caller for it.
460 This should handle a series of initial tail calls. */
42bfe59e
TW
461 while (prev->up != 0)
462 prev = ftrace_find_call_by_number (btinfo, prev->up);
02d27625 463
259ba1e8 464 bfun->level = prev->level - 1;
23a7fe75
MM
465
466 /* Fix up the call stack for PREV. */
4aeb0dfc 467 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
23a7fe75
MM
468
469 ftrace_debug (bfun, "new return - no caller");
470 }
471 else
02d27625 472 {
23a7fe75 473 /* There is a call in PREV's back trace to which we should have
259ba1e8
MM
474 returned but didn't. Let's start a new, separate back trace
475 from PREV's level. */
476 bfun->level = prev->level - 1;
477
478 /* We fix up the back trace for PREV but leave other function segments
479 on the same level as they are.
480 This should handle things like schedule () correctly where we're
481 switching contexts. */
42bfe59e 482 prev->up = bfun->number;
259ba1e8 483 prev->flags = BFUN_UP_LINKS_TO_RET;
02d27625 484
23a7fe75 485 ftrace_debug (bfun, "new return - unknown caller");
02d27625 486 }
23a7fe75
MM
487 }
488
489 return bfun;
490}
491
8286623c 492/* Add a new function segment for a function switch at the end of the trace.
17b89b34 493 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
494 MFUN and FUN are the symbol information we have for this function. */
495
496static struct btrace_function *
17b89b34 497ftrace_new_switch (struct btrace_thread_info *btinfo,
23a7fe75
MM
498 struct minimal_symbol *mfun,
499 struct symbol *fun)
500{
08c3f6d2 501 struct btrace_function *prev, *bfun;
23a7fe75 502
4c2c7ac6
MM
503 /* This is an unexplained function switch. We can't really be sure about the
504 call stack, yet the best I can think of right now is to preserve it. */
8286623c 505 bfun = ftrace_new_function (btinfo, mfun, fun);
08c3f6d2 506 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
4c2c7ac6
MM
507 bfun->up = prev->up;
508 bfun->flags = prev->flags;
02d27625 509
23a7fe75
MM
510 ftrace_debug (bfun, "new switch");
511
512 return bfun;
513}
514
8286623c
TW
515/* Add a new function segment for a gap in the trace due to a decode error at
516 the end of the trace.
17b89b34 517 BTINFO is the branch trace information for the current thread.
31fd9caa
MM
518 ERRCODE is the format-specific error code. */
519
520static struct btrace_function *
8ffd39f2
TW
521ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
522 std::vector<unsigned int> &gaps)
31fd9caa
MM
523{
524 struct btrace_function *bfun;
525
b54b03bd 526 if (btinfo->functions.empty ())
8286623c 527 bfun = ftrace_new_function (btinfo, NULL, NULL);
b54b03bd
TW
528 else
529 {
530 /* We hijack the previous function segment if it was empty. */
08c3f6d2 531 bfun = &btinfo->functions.back ();
0860c437 532 if (bfun->errcode != 0 || !bfun->insn.empty ())
b54b03bd
TW
533 bfun = ftrace_new_function (btinfo, NULL, NULL);
534 }
31fd9caa
MM
535
536 bfun->errcode = errcode;
8ffd39f2 537 gaps.push_back (bfun->number);
31fd9caa
MM
538
539 ftrace_debug (bfun, "new gap");
540
541 return bfun;
542}
543
8286623c
TW
544/* Update the current function segment at the end of the trace in BTINFO with
545 respect to the instruction at PC. This may create new function segments.
23a7fe75
MM
546 Return the chronologically latest function segment, never NULL. */
547
548static struct btrace_function *
8286623c 549ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
23a7fe75
MM
550{
551 struct bound_minimal_symbol bmfun;
552 struct minimal_symbol *mfun;
553 struct symbol *fun;
b54b03bd 554 struct btrace_function *bfun;
23a7fe75
MM
555
556 /* Try to determine the function we're in. We use both types of symbols
557 to avoid surprises when we sometimes get a full symbol and sometimes
558 only a minimal symbol. */
559 fun = find_pc_function (pc);
560 bmfun = lookup_minimal_symbol_by_pc (pc);
561 mfun = bmfun.minsym;
562
563 if (fun == NULL && mfun == NULL)
564 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
565
b54b03bd
TW
566 /* If we didn't have a function, we create one. */
567 if (btinfo->functions.empty ())
568 return ftrace_new_function (btinfo, mfun, fun);
569
570 /* If we had a gap before, we create a function. */
08c3f6d2 571 bfun = &btinfo->functions.back ();
b54b03bd 572 if (bfun->errcode != 0)
8286623c 573 return ftrace_new_function (btinfo, mfun, fun);
23a7fe75
MM
574
575 /* Check the last instruction, if we have one.
576 We do this check first, since it allows us to fill in the call stack
577 links in addition to the normal flow links. */
0860c437
SM
578 btrace_insn *last = NULL;
579 if (!bfun->insn.empty ())
580 last = &bfun->insn.back ();
23a7fe75
MM
581
582 if (last != NULL)
583 {
7d5c24b3
MM
584 switch (last->iclass)
585 {
586 case BTRACE_INSN_RETURN:
986b6601
MM
587 {
588 const char *fname;
589
590 /* On some systems, _dl_runtime_resolve returns to the resolved
591 function instead of jumping to it. From our perspective,
592 however, this is a tailcall.
593 If we treated it as return, we wouldn't be able to find the
594 resolved function in our stack back trace. Hence, we would
595 lose the current stack back trace and start anew with an empty
596 back trace. When the resolved function returns, we would then
597 create a stack back trace with the same function names but
598 different frame id's. This will confuse stepping. */
599 fname = ftrace_print_function_name (bfun);
600 if (strcmp (fname, "_dl_runtime_resolve") == 0)
8286623c 601 return ftrace_new_tailcall (btinfo, mfun, fun);
986b6601 602
8286623c 603 return ftrace_new_return (btinfo, mfun, fun);
986b6601 604 }
23a7fe75 605
7d5c24b3
MM
606 case BTRACE_INSN_CALL:
607 /* Ignore calls to the next instruction. They are used for PIC. */
608 if (last->pc + last->size == pc)
609 break;
23a7fe75 610
8286623c 611 return ftrace_new_call (btinfo, mfun, fun);
23a7fe75 612
7d5c24b3
MM
613 case BTRACE_INSN_JUMP:
614 {
615 CORE_ADDR start;
23a7fe75 616
7d5c24b3 617 start = get_pc_function_start (pc);
23a7fe75 618
2dfdb47a
MM
619 /* A jump to the start of a function is (typically) a tail call. */
620 if (start == pc)
8286623c 621 return ftrace_new_tailcall (btinfo, mfun, fun);
2dfdb47a 622
2cb2ba9a
MM
623 /* Some versions of _Unwind_RaiseException use an indirect
624 jump to 'return' to the exception handler of the caller
625 handling the exception instead of a return. Let's restrict
626 this heuristic to that and related functions. */
627 const char *fname = ftrace_print_function_name (bfun);
628 if (strncmp (fname, "_Unwind_", strlen ("_Unwind_")) == 0)
629 {
630 struct btrace_function *caller
631 = ftrace_find_call_by_number (btinfo, bfun->up);
632 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
633 if (caller != NULL)
634 return ftrace_new_return (btinfo, mfun, fun);
635 }
636
7d5c24b3 637 /* If we can't determine the function for PC, we treat a jump at
2dfdb47a
MM
638 the end of the block as tail call if we're switching functions
639 and as an intra-function branch if we don't. */
640 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
8286623c 641 return ftrace_new_tailcall (btinfo, mfun, fun);
2dfdb47a
MM
642
643 break;
7d5c24b3 644 }
02d27625 645 }
23a7fe75
MM
646 }
647
648 /* Check if we're switching functions for some other reason. */
649 if (ftrace_function_switched (bfun, mfun, fun))
650 {
651 DEBUG_FTRACE ("switching from %s in %s at %s",
652 ftrace_print_insn_addr (last),
653 ftrace_print_function_name (bfun),
654 ftrace_print_filename (bfun));
02d27625 655
8286623c 656 return ftrace_new_switch (btinfo, mfun, fun);
23a7fe75
MM
657 }
658
659 return bfun;
660}
661
23a7fe75
MM
662/* Add the instruction at PC to BFUN's instructions. */
663
664static void
0860c437 665ftrace_update_insns (struct btrace_function *bfun, const btrace_insn &insn)
23a7fe75 666{
0860c437 667 bfun->insn.push_back (insn);
23a7fe75
MM
668
669 if (record_debug > 1)
670 ftrace_debug (bfun, "update insn");
671}
672
7d5c24b3
MM
673/* Classify the instruction at PC. */
674
675static enum btrace_insn_class
676ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
677{
7d5c24b3
MM
678 enum btrace_insn_class iclass;
679
680 iclass = BTRACE_INSN_OTHER;
a70b8144 681 try
7d5c24b3
MM
682 {
683 if (gdbarch_insn_is_call (gdbarch, pc))
684 iclass = BTRACE_INSN_CALL;
685 else if (gdbarch_insn_is_ret (gdbarch, pc))
686 iclass = BTRACE_INSN_RETURN;
687 else if (gdbarch_insn_is_jump (gdbarch, pc))
688 iclass = BTRACE_INSN_JUMP;
689 }
230d2906 690 catch (const gdb_exception_error &error)
492d29ea
PA
691 {
692 }
7d5c24b3
MM
693
694 return iclass;
695}
696
d87fdac3
MM
697/* Try to match the back trace at LHS to the back trace at RHS. Returns the
698 number of matching function segments or zero if the back traces do not
42bfe59e 699 match. BTINFO is the branch trace information for the current thread. */
d87fdac3
MM
700
701static int
42bfe59e
TW
702ftrace_match_backtrace (struct btrace_thread_info *btinfo,
703 struct btrace_function *lhs,
d87fdac3
MM
704 struct btrace_function *rhs)
705{
706 int matches;
707
708 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
709 {
710 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
711 return 0;
712
42bfe59e
TW
713 lhs = ftrace_get_caller (btinfo, lhs);
714 rhs = ftrace_get_caller (btinfo, rhs);
d87fdac3
MM
715 }
716
717 return matches;
718}
719
eb8f2b9c
TW
720/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
721 BTINFO is the branch trace information for the current thread. */
d87fdac3
MM
722
723static void
eb8f2b9c
TW
724ftrace_fixup_level (struct btrace_thread_info *btinfo,
725 struct btrace_function *bfun, int adjustment)
d87fdac3
MM
726{
727 if (adjustment == 0)
728 return;
729
730 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
731 ftrace_debug (bfun, "..bfun");
732
eb8f2b9c
TW
733 while (bfun != NULL)
734 {
735 bfun->level += adjustment;
736 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
737 }
d87fdac3
MM
738}
739
740/* Recompute the global level offset. Traverse the function trace and compute
741 the global level offset as the negative of the minimal function level. */
742
743static void
744ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
745{
b54b03bd 746 int level = INT_MAX;
d87fdac3
MM
747
748 if (btinfo == NULL)
749 return;
750
b54b03bd 751 if (btinfo->functions.empty ())
d87fdac3
MM
752 return;
753
b54b03bd
TW
754 unsigned int length = btinfo->functions.size() - 1;
755 for (unsigned int i = 0; i < length; ++i)
08c3f6d2 756 level = std::min (level, btinfo->functions[i].level);
b54b03bd 757
d87fdac3
MM
758 /* The last function segment contains the current instruction, which is not
759 really part of the trace. If it contains just this one instruction, we
b54b03bd 760 ignore the segment. */
08c3f6d2 761 struct btrace_function *last = &btinfo->functions.back();
0860c437 762 if (last->insn.size () != 1)
b54b03bd 763 level = std::min (level, last->level);
d87fdac3
MM
764
765 DEBUG_FTRACE ("setting global level offset: %d", -level);
766 btinfo->level = -level;
767}
768
769/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
42bfe59e
TW
770 ftrace_connect_backtrace. BTINFO is the branch trace information for the
771 current thread. */
d87fdac3
MM
772
773static void
42bfe59e
TW
774ftrace_connect_bfun (struct btrace_thread_info *btinfo,
775 struct btrace_function *prev,
d87fdac3
MM
776 struct btrace_function *next)
777{
778 DEBUG_FTRACE ("connecting...");
779 ftrace_debug (prev, "..prev");
780 ftrace_debug (next, "..next");
781
782 /* The function segments are not yet connected. */
4aeb0dfc
TW
783 gdb_assert (prev->next == 0);
784 gdb_assert (next->prev == 0);
d87fdac3 785
4aeb0dfc
TW
786 prev->next = next->number;
787 next->prev = prev->number;
d87fdac3
MM
788
789 /* We may have moved NEXT to a different function level. */
eb8f2b9c 790 ftrace_fixup_level (btinfo, next, prev->level - next->level);
d87fdac3
MM
791
792 /* If we run out of back trace for one, let's use the other's. */
42bfe59e 793 if (prev->up == 0)
d87fdac3 794 {
42bfe59e
TW
795 const btrace_function_flags flags = next->flags;
796
797 next = ftrace_find_call_by_number (btinfo, next->up);
798 if (next != NULL)
d87fdac3
MM
799 {
800 DEBUG_FTRACE ("using next's callers");
4aeb0dfc 801 ftrace_fixup_caller (btinfo, prev, next, flags);
d87fdac3
MM
802 }
803 }
42bfe59e 804 else if (next->up == 0)
d87fdac3 805 {
42bfe59e
TW
806 const btrace_function_flags flags = prev->flags;
807
808 prev = ftrace_find_call_by_number (btinfo, prev->up);
809 if (prev != NULL)
d87fdac3
MM
810 {
811 DEBUG_FTRACE ("using prev's callers");
4aeb0dfc 812 ftrace_fixup_caller (btinfo, next, prev, flags);
d87fdac3
MM
813 }
814 }
815 else
816 {
817 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
818 link to add the tail callers to NEXT's back trace.
819
820 This removes NEXT->UP from NEXT's back trace. It will be added back
821 when connecting NEXT and PREV's callers - provided they exist.
822
823 If PREV's back trace consists of a series of tail calls without an
824 actual call, there will be no further connection and NEXT's caller will
825 be removed for good. To catch this case, we handle it here and connect
826 the top of PREV's back trace to NEXT's caller. */
827 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
828 {
829 struct btrace_function *caller;
42bfe59e 830 btrace_function_flags next_flags, prev_flags;
d87fdac3
MM
831
832 /* We checked NEXT->UP above so CALLER can't be NULL. */
42bfe59e
TW
833 caller = ftrace_find_call_by_number (btinfo, next->up);
834 next_flags = next->flags;
835 prev_flags = prev->flags;
d87fdac3
MM
836
837 DEBUG_FTRACE ("adding prev's tail calls to next");
838
42bfe59e 839 prev = ftrace_find_call_by_number (btinfo, prev->up);
4aeb0dfc 840 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
d87fdac3 841
42bfe59e
TW
842 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
843 prev->up))
d87fdac3
MM
844 {
845 /* At the end of PREV's back trace, continue with CALLER. */
42bfe59e 846 if (prev->up == 0)
d87fdac3
MM
847 {
848 DEBUG_FTRACE ("fixing up link for tailcall chain");
849 ftrace_debug (prev, "..top");
850 ftrace_debug (caller, "..up");
851
4aeb0dfc 852 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
d87fdac3
MM
853
854 /* If we skipped any tail calls, this may move CALLER to a
855 different function level.
856
857 Note that changing CALLER's level is only OK because we
858 know that this is the last iteration of the bottom-to-top
859 walk in ftrace_connect_backtrace.
860
861 Otherwise we will fix up CALLER's level when we connect it
862 to PREV's caller in the next iteration. */
eb8f2b9c
TW
863 ftrace_fixup_level (btinfo, caller,
864 prev->level - caller->level - 1);
d87fdac3
MM
865 break;
866 }
867
868 /* There's nothing to do if we find a real call. */
869 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
870 {
871 DEBUG_FTRACE ("will fix up link in next iteration");
872 break;
873 }
874 }
875 }
876 }
877}
878
879/* Connect function segments on the same level in the back trace at LHS and RHS.
880 The back traces at LHS and RHS are expected to match according to
42bfe59e
TW
881 ftrace_match_backtrace. BTINFO is the branch trace information for the
882 current thread. */
d87fdac3
MM
883
884static void
42bfe59e
TW
885ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
886 struct btrace_function *lhs,
d87fdac3
MM
887 struct btrace_function *rhs)
888{
889 while (lhs != NULL && rhs != NULL)
890 {
891 struct btrace_function *prev, *next;
892
893 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
894
895 /* Connecting LHS and RHS may change the up link. */
896 prev = lhs;
897 next = rhs;
898
42bfe59e
TW
899 lhs = ftrace_get_caller (btinfo, lhs);
900 rhs = ftrace_get_caller (btinfo, rhs);
d87fdac3 901
42bfe59e 902 ftrace_connect_bfun (btinfo, prev, next);
d87fdac3
MM
903 }
904}
905
906/* Bridge the gap between two function segments left and right of a gap if their
42bfe59e
TW
907 respective back traces match in at least MIN_MATCHES functions. BTINFO is
908 the branch trace information for the current thread.
d87fdac3
MM
909
910 Returns non-zero if the gap could be bridged, zero otherwise. */
911
912static int
42bfe59e
TW
913ftrace_bridge_gap (struct btrace_thread_info *btinfo,
914 struct btrace_function *lhs, struct btrace_function *rhs,
d87fdac3
MM
915 int min_matches)
916{
917 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
918 int best_matches;
919
920 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
921 rhs->insn_offset - 1, min_matches);
922
923 best_matches = 0;
924 best_l = NULL;
925 best_r = NULL;
926
927 /* We search the back traces of LHS and RHS for valid connections and connect
85102364 928 the two function segments that give the longest combined back trace. */
d87fdac3 929
42bfe59e
TW
930 for (cand_l = lhs; cand_l != NULL;
931 cand_l = ftrace_get_caller (btinfo, cand_l))
932 for (cand_r = rhs; cand_r != NULL;
933 cand_r = ftrace_get_caller (btinfo, cand_r))
d87fdac3
MM
934 {
935 int matches;
936
42bfe59e 937 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
d87fdac3
MM
938 if (best_matches < matches)
939 {
940 best_matches = matches;
941 best_l = cand_l;
942 best_r = cand_r;
943 }
944 }
945
946 /* We need at least MIN_MATCHES matches. */
947 gdb_assert (min_matches > 0);
948 if (best_matches < min_matches)
949 return 0;
950
951 DEBUG_FTRACE ("..matches: %d", best_matches);
952
953 /* We will fix up the level of BEST_R and succeeding function segments such
954 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
955
956 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
957 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
958
959 To catch this, we already fix up the level here where we can start at RHS
960 instead of at BEST_R. We will ignore the level fixup when connecting
961 BEST_L to BEST_R as they will already be on the same level. */
eb8f2b9c 962 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
d87fdac3 963
42bfe59e 964 ftrace_connect_backtrace (btinfo, best_l, best_r);
d87fdac3
MM
965
966 return best_matches;
967}
968
969/* Try to bridge gaps due to overflow or decode errors by connecting the
970 function segments that are separated by the gap. */
971
972static void
8ffd39f2 973btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
d87fdac3 974{
4aeb0dfc 975 struct btrace_thread_info *btinfo = &tp->btrace;
8ffd39f2 976 std::vector<unsigned int> remaining;
d87fdac3
MM
977 int min_matches;
978
979 DEBUG ("bridge gaps");
980
d87fdac3
MM
981 /* We require a minimum amount of matches for bridging a gap. The number of
982 required matches will be lowered with each iteration.
983
984 The more matches the higher our confidence that the bridging is correct.
985 For big gaps or small traces, however, it may not be feasible to require a
986 high number of matches. */
987 for (min_matches = 5; min_matches > 0; --min_matches)
988 {
989 /* Let's try to bridge as many gaps as we can. In some cases, we need to
990 skip a gap and revisit it again after we closed later gaps. */
8ffd39f2 991 while (!gaps.empty ())
d87fdac3 992 {
8ffd39f2 993 for (const unsigned int number : gaps)
d87fdac3 994 {
8ffd39f2 995 struct btrace_function *gap, *lhs, *rhs;
d87fdac3
MM
996 int bridged;
997
8ffd39f2
TW
998 gap = ftrace_find_call_by_number (btinfo, number);
999
d87fdac3
MM
1000 /* We may have a sequence of gaps if we run from one error into
1001 the next as we try to re-sync onto the trace stream. Ignore
1002 all but the leftmost gap in such a sequence.
1003
1004 Also ignore gaps at the beginning of the trace. */
eb8f2b9c 1005 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
d87fdac3
MM
1006 if (lhs == NULL || lhs->errcode != 0)
1007 continue;
1008
1009 /* Skip gaps to the right. */
eb8f2b9c
TW
1010 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
1011 while (rhs != NULL && rhs->errcode != 0)
1012 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
d87fdac3
MM
1013
1014 /* Ignore gaps at the end of the trace. */
1015 if (rhs == NULL)
1016 continue;
1017
eb8f2b9c 1018 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
d87fdac3
MM
1019
1020 /* Keep track of gaps we were not able to bridge and try again.
1021 If we just pushed them to the end of GAPS we would risk an
1022 infinite loop in case we simply cannot bridge a gap. */
1023 if (bridged == 0)
8ffd39f2 1024 remaining.push_back (number);
d87fdac3
MM
1025 }
1026
1027 /* Let's see if we made any progress. */
8ffd39f2 1028 if (remaining.size () == gaps.size ())
d87fdac3
MM
1029 break;
1030
8ffd39f2
TW
1031 gaps.clear ();
1032 gaps.swap (remaining);
d87fdac3
MM
1033 }
1034
1035 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
8ffd39f2 1036 if (gaps.empty ())
d87fdac3
MM
1037 break;
1038
8ffd39f2 1039 remaining.clear ();
d87fdac3
MM
1040 }
1041
d87fdac3
MM
1042 /* We may omit this in some cases. Not sure it is worth the extra
1043 complication, though. */
eb8f2b9c 1044 ftrace_compute_global_level_offset (btinfo);
d87fdac3
MM
1045}
1046
734b0e4b 1047/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
1048
1049static void
76235df1 1050btrace_compute_ftrace_bts (struct thread_info *tp,
d87fdac3 1051 const struct btrace_data_bts *btrace,
8ffd39f2 1052 std::vector<unsigned int> &gaps)
23a7fe75 1053{
ecd799b1
SM
1054 /* We may end up doing target calls that require the current thread to be TP,
1055 for example reading memory through gdb_insn_length. Make sure TP is the
1056 current thread. */
1057 scoped_restore_current_thread restore_thread;
1058 switch_to_thread (tp);
1059
76235df1 1060 struct btrace_thread_info *btinfo;
d87fdac3 1061 unsigned int blk;
23a7fe75
MM
1062 int level;
1063
99d9c3b9 1064 gdbarch *gdbarch = current_inferior ()->arch ();
76235df1 1065 btinfo = &tp->btrace;
46f29a9a 1066 blk = btrace->blocks->size ();
23a7fe75 1067
b54b03bd
TW
1068 if (btinfo->functions.empty ())
1069 level = INT_MAX;
1070 else
1071 level = -btinfo->level;
1072
23a7fe75
MM
1073 while (blk != 0)
1074 {
23a7fe75
MM
1075 CORE_ADDR pc;
1076
1077 blk -= 1;
1078
46f29a9a
AB
1079 const btrace_block &block = btrace->blocks->at (blk);
1080 pc = block.begin;
23a7fe75
MM
1081
1082 for (;;)
1083 {
b54b03bd 1084 struct btrace_function *bfun;
7d5c24b3 1085 struct btrace_insn insn;
23a7fe75
MM
1086 int size;
1087
1088 /* We should hit the end of the block. Warn if we went too far. */
46f29a9a 1089 if (block.end < pc)
23a7fe75 1090 {
b61ce85c 1091 /* Indicate the gap in the trace. */
8ffd39f2 1092 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
b61ce85c
MM
1093
1094 warning (_("Recorded trace may be corrupted at instruction "
b54b03bd 1095 "%u (pc = %s)."), bfun->insn_offset - 1,
b61ce85c 1096 core_addr_to_string_nz (pc));
63ab433e 1097
23a7fe75
MM
1098 break;
1099 }
1100
b54b03bd 1101 bfun = ftrace_update_function (btinfo, pc);
23a7fe75 1102
8710b709
MM
1103 /* Maintain the function level offset.
1104 For all but the last block, we do it here. */
1105 if (blk != 0)
b54b03bd 1106 level = std::min (level, bfun->level);
23a7fe75 1107
7d5c24b3 1108 size = 0;
a70b8144 1109 try
492d29ea
PA
1110 {
1111 size = gdb_insn_length (gdbarch, pc);
1112 }
230d2906 1113 catch (const gdb_exception_error &error)
492d29ea
PA
1114 {
1115 }
7d5c24b3
MM
1116
1117 insn.pc = pc;
1118 insn.size = size;
1119 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 1120 insn.flags = 0;
7d5c24b3 1121
0860c437 1122 ftrace_update_insns (bfun, insn);
23a7fe75
MM
1123
1124 /* We're done once we pushed the instruction at the end. */
46f29a9a 1125 if (block.end == pc)
23a7fe75
MM
1126 break;
1127
7d5c24b3 1128 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
1129 if (size <= 0)
1130 {
31fd9caa
MM
1131 /* Indicate the gap in the trace. We just added INSN so we're
1132 not at the beginning. */
8ffd39f2 1133 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
31fd9caa 1134
63ab433e 1135 warning (_("Recorded trace may be incomplete at instruction %u "
b54b03bd 1136 "(pc = %s)."), bfun->insn_offset - 1,
63ab433e
MM
1137 core_addr_to_string_nz (pc));
1138
23a7fe75
MM
1139 break;
1140 }
1141
1142 pc += size;
8710b709
MM
1143
1144 /* Maintain the function level offset.
1145 For the last block, we do it here to not consider the last
1146 instruction.
1147 Since the last instruction corresponds to the current instruction
1148 and is not really part of the execution history, it shouldn't
1149 affect the level. */
1150 if (blk == 0)
b54b03bd 1151 level = std::min (level, bfun->level);
23a7fe75 1152 }
02d27625
MM
1153 }
1154
23a7fe75
MM
1155 /* LEVEL is the minimal function level of all btrace function segments.
1156 Define the global level offset to -LEVEL so all function levels are
1157 normalized to start at zero. */
1158 btinfo->level = -level;
02d27625
MM
1159}
1160
b20a6524
MM
1161#if defined (HAVE_LIBIPT)
1162
1163static enum btrace_insn_class
1164pt_reclassify_insn (enum pt_insn_class iclass)
1165{
1166 switch (iclass)
1167 {
1168 case ptic_call:
1169 return BTRACE_INSN_CALL;
1170
1171 case ptic_return:
1172 return BTRACE_INSN_RETURN;
1173
1174 case ptic_jump:
1175 return BTRACE_INSN_JUMP;
1176
1177 default:
1178 return BTRACE_INSN_OTHER;
1179 }
1180}
1181
da8c46d2
MM
1182/* Return the btrace instruction flags for INSN. */
1183
d7abe101 1184static btrace_insn_flags
b5c36682 1185pt_btrace_insn_flags (const struct pt_insn &insn)
da8c46d2 1186{
d7abe101 1187 btrace_insn_flags flags = 0;
da8c46d2 1188
b5c36682 1189 if (insn.speculative)
da8c46d2
MM
1190 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1191
1192 return flags;
1193}
1194
b5c36682
PA
1195/* Return the btrace instruction for INSN. */
1196
1197static btrace_insn
1198pt_btrace_insn (const struct pt_insn &insn)
1199{
1200 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1201 pt_reclassify_insn (insn.iclass),
1202 pt_btrace_insn_flags (insn)};
1203}
1204
13ace077
MM
1205/* Handle instruction decode events (libipt-v2). */
1206
1207static int
1208handle_pt_insn_events (struct btrace_thread_info *btinfo,
1209 struct pt_insn_decoder *decoder,
1210 std::vector<unsigned int> &gaps, int status)
1211{
1212#if defined (HAVE_PT_INSN_EVENT)
1213 while (status & pts_event_pending)
1214 {
1215 struct btrace_function *bfun;
1216 struct pt_event event;
1217 uint64_t offset;
1218
1219 status = pt_insn_event (decoder, &event, sizeof (event));
1220 if (status < 0)
1221 break;
1222
1223 switch (event.type)
1224 {
1225 default:
1226 break;
1227
1228 case ptev_enabled:
d51344c9
MM
1229 if (event.status_update != 0)
1230 break;
1231
13ace077
MM
1232 if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1233 {
1234 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1235
1236 pt_insn_get_offset (decoder, &offset);
1237
1238 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1239 PRIx64 ")."), bfun->insn_offset - 1, offset);
1240 }
1241
1242 break;
1243
1244 case ptev_overflow:
1245 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1246
1247 pt_insn_get_offset (decoder, &offset);
1248
1249 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1250 bfun->insn_offset - 1, offset);
1251
1252 break;
1253 }
1254 }
1255#endif /* defined (HAVE_PT_INSN_EVENT) */
1256
1257 return status;
1258}
1259
1260/* Handle events indicated by flags in INSN (libipt-v1). */
1261
1262static void
1263handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1264 struct pt_insn_decoder *decoder,
1265 const struct pt_insn &insn,
1266 std::vector<unsigned int> &gaps)
1267{
1268#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1269 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1270 times, we continue from the same instruction we stopped before. This is
1271 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1272 means that we continued from some other instruction. Indicate this as a
1273 trace gap except when tracing just started. */
1274 if (insn.enabled && !btinfo->functions.empty ())
1275 {
1276 struct btrace_function *bfun;
1277 uint64_t offset;
1278
1279 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1280
1281 pt_insn_get_offset (decoder, &offset);
1282
1283 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1284 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1285 insn.ip);
1286 }
1287#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1288
1289#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1290 /* Indicate trace overflows. */
1291 if (insn.resynced)
1292 {
1293 struct btrace_function *bfun;
1294 uint64_t offset;
1295
1296 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1297
1298 pt_insn_get_offset (decoder, &offset);
1299
1300 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1301 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1302 }
1303#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1304}
b5c36682 1305
17b89b34 1306/* Add function branch trace to BTINFO using DECODER. */
b20a6524
MM
1307
1308static void
17b89b34
TW
1309ftrace_add_pt (struct btrace_thread_info *btinfo,
1310 struct pt_insn_decoder *decoder,
b54b03bd 1311 int *plevel,
8ffd39f2 1312 std::vector<unsigned int> &gaps)
b20a6524 1313{
b54b03bd 1314 struct btrace_function *bfun;
b20a6524 1315 uint64_t offset;
13ace077 1316 int status;
b20a6524 1317
b20a6524
MM
1318 for (;;)
1319 {
b20a6524
MM
1320 struct pt_insn insn;
1321
13ace077
MM
1322 status = pt_insn_sync_forward (decoder);
1323 if (status < 0)
b20a6524 1324 {
13ace077 1325 if (status != -pte_eos)
bc504a31 1326 warning (_("Failed to synchronize onto the Intel Processor "
13ace077 1327 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
b20a6524
MM
1328 break;
1329 }
1330
b20a6524
MM
1331 for (;;)
1332 {
13ace077
MM
1333 /* Handle events from the previous iteration or synchronization. */
1334 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1335 if (status < 0)
b20a6524
MM
1336 break;
1337
13ace077
MM
1338 status = pt_insn_next (decoder, &insn, sizeof(insn));
1339 if (status < 0)
1340 break;
b61ce85c 1341
13ace077
MM
1342 /* Handle events indicated by flags in INSN. */
1343 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
b20a6524 1344
b54b03bd 1345 bfun = ftrace_update_function (btinfo, insn.ip);
b20a6524
MM
1346
1347 /* Maintain the function level offset. */
b54b03bd 1348 *plevel = std::min (*plevel, bfun->level);
b20a6524 1349
7525b645 1350 ftrace_update_insns (bfun, pt_btrace_insn (insn));
b20a6524
MM
1351 }
1352
13ace077 1353 if (status == -pte_eos)
b20a6524
MM
1354 break;
1355
b20a6524 1356 /* Indicate the gap in the trace. */
13ace077 1357 bfun = ftrace_new_gap (btinfo, status, gaps);
b20a6524 1358
63ab433e
MM
1359 pt_insn_get_offset (decoder, &offset);
1360
1361 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
13ace077
MM
1362 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1363 offset, insn.ip, pt_errstr (pt_errcode (status)));
63ab433e 1364 }
b20a6524
MM
1365}
1366
1367/* A callback function to allow the trace decoder to read the inferior's
1368 memory. */
1369
1370static int
1371btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 1372 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
1373 void *context)
1374{
43368e1d 1375 int result, errcode;
b20a6524 1376
43368e1d 1377 result = (int) size;
a70b8144 1378 try
b20a6524 1379 {
80a2b330 1380 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 1381 if (errcode != 0)
43368e1d 1382 result = -pte_nomap;
b20a6524 1383 }
230d2906 1384 catch (const gdb_exception_error &error)
b20a6524 1385 {
43368e1d 1386 result = -pte_nomap;
b20a6524 1387 }
b20a6524 1388
43368e1d 1389 return result;
b20a6524
MM
1390}
1391
1392/* Translate the vendor from one enum to another. */
1393
1394static enum pt_cpu_vendor
1395pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1396{
1397 switch (vendor)
1398 {
1399 default:
1400 return pcv_unknown;
1401
1402 case CV_INTEL:
1403 return pcv_intel;
1404 }
1405}
1406
1407/* Finalize the function branch trace after decode. */
1408
1409static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1410 struct thread_info *tp, int level)
1411{
1412 pt_insn_free_decoder (decoder);
1413
1414 /* LEVEL is the minimal function level of all btrace function segments.
1415 Define the global level offset to -LEVEL so all function levels are
1416 normalized to start at zero. */
1417 tp->btrace.level = -level;
1418
1419 /* Add a single last instruction entry for the current PC.
1420 This allows us to compute the backtrace at the current PC using both
1421 standard unwind and btrace unwind.
1422 This extra entry is ignored by all record commands. */
1423 btrace_add_pc (tp);
1424}
1425
bc504a31
PA
1426/* Compute the function branch trace from Intel Processor Trace
1427 format. */
b20a6524
MM
1428
1429static void
1430btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3 1431 const struct btrace_data_pt *btrace,
8ffd39f2 1432 std::vector<unsigned int> &gaps)
b20a6524 1433{
ecd799b1
SM
1434 /* We may end up doing target calls that require the current thread to be TP,
1435 for example reading memory through btrace_pt_readmem_callback. Make sure
1436 TP is the current thread. */
1437 scoped_restore_current_thread restore_thread;
1438 switch_to_thread (tp);
1439
b20a6524
MM
1440 struct btrace_thread_info *btinfo;
1441 struct pt_insn_decoder *decoder;
1442 struct pt_config config;
1443 int level, errcode;
1444
1445 if (btrace->size == 0)
1446 return;
1447
1448 btinfo = &tp->btrace;
b54b03bd
TW
1449 if (btinfo->functions.empty ())
1450 level = INT_MAX;
1451 else
1452 level = -btinfo->level;
b20a6524
MM
1453
1454 pt_config_init(&config);
1455 config.begin = btrace->data;
1456 config.end = btrace->data + btrace->size;
1457
4a4495d6
MM
1458 /* We treat an unknown vendor as 'no errata'. */
1459 if (btrace->config.cpu.vendor != CV_UNKNOWN)
1460 {
1461 config.cpu.vendor
1462 = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1463 config.cpu.family = btrace->config.cpu.family;
1464 config.cpu.model = btrace->config.cpu.model;
1465 config.cpu.stepping = btrace->config.cpu.stepping;
b20a6524 1466
4a4495d6
MM
1467 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1468 if (errcode < 0)
1469 error (_("Failed to configure the Intel Processor Trace "
1470 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
1471 }
b20a6524
MM
1472
1473 decoder = pt_insn_alloc_decoder (&config);
1474 if (decoder == NULL)
bc504a31 1475 error (_("Failed to allocate the Intel Processor Trace decoder."));
b20a6524 1476
a70b8144 1477 try
b20a6524
MM
1478 {
1479 struct pt_image *image;
1480
1481 image = pt_insn_get_image(decoder);
1482 if (image == NULL)
bc504a31 1483 error (_("Failed to configure the Intel Processor Trace decoder."));
b20a6524
MM
1484
1485 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1486 if (errcode < 0)
bc504a31 1487 error (_("Failed to configure the Intel Processor Trace decoder: "
b20a6524
MM
1488 "%s."), pt_errstr (pt_errcode (errcode)));
1489
b54b03bd 1490 ftrace_add_pt (btinfo, decoder, &level, gaps);
b20a6524 1491 }
230d2906 1492 catch (const gdb_exception &error)
b20a6524
MM
1493 {
1494 /* Indicate a gap in the trace if we quit trace processing. */
b54b03bd 1495 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
8ffd39f2 1496 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
b20a6524
MM
1497
1498 btrace_finalize_ftrace_pt (decoder, tp, level);
1499
eedc3f4f 1500 throw;
b20a6524 1501 }
b20a6524
MM
1502
1503 btrace_finalize_ftrace_pt (decoder, tp, level);
1504}
1505
1506#else /* defined (HAVE_LIBIPT) */
1507
1508static void
1509btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3 1510 const struct btrace_data_pt *btrace,
8ffd39f2 1511 std::vector<unsigned int> &gaps)
b20a6524 1512{
f34652de 1513 internal_error (_("Unexpected branch trace format."));
b20a6524
MM
1514}
1515
1516#endif /* defined (HAVE_LIBIPT) */
1517
734b0e4b 1518/* Compute the function branch trace from a block branch trace BTRACE for
4a4495d6
MM
1519 a thread given by BTINFO. If CPU is not NULL, overwrite the cpu in the
1520 branch trace configuration. This is currently only used for the PT
1521 format. */
734b0e4b
MM
1522
1523static void
4a4495d6
MM
1524btrace_compute_ftrace_1 (struct thread_info *tp,
1525 struct btrace_data *btrace,
1526 const struct btrace_cpu *cpu,
8ffd39f2 1527 std::vector<unsigned int> &gaps)
734b0e4b
MM
1528{
1529 DEBUG ("compute ftrace");
1530
1531 switch (btrace->format)
1532 {
1533 case BTRACE_FORMAT_NONE:
1534 return;
1535
1536 case BTRACE_FORMAT_BTS:
d87fdac3 1537 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
734b0e4b 1538 return;
b20a6524
MM
1539
1540 case BTRACE_FORMAT_PT:
4a4495d6
MM
1541 /* Overwrite the cpu we use for enabling errata workarounds. */
1542 if (cpu != nullptr)
1543 btrace->variant.pt.config.cpu = *cpu;
1544
d87fdac3 1545 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
b20a6524 1546 return;
734b0e4b
MM
1547 }
1548
f34652de 1549 internal_error (_("Unknown branch trace format."));
734b0e4b
MM
1550}
1551
d87fdac3 1552static void
8ffd39f2 1553btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
d87fdac3 1554{
8ffd39f2 1555 if (!gaps.empty ())
d87fdac3 1556 {
8ffd39f2 1557 tp->btrace.ngaps += gaps.size ();
d87fdac3
MM
1558 btrace_bridge_gaps (tp, gaps);
1559 }
1560}
1561
1562static void
4a4495d6
MM
1563btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace,
1564 const struct btrace_cpu *cpu)
d87fdac3 1565{
8ffd39f2 1566 std::vector<unsigned int> gaps;
d87fdac3 1567
a70b8144 1568 try
d87fdac3 1569 {
4a4495d6 1570 btrace_compute_ftrace_1 (tp, btrace, cpu, gaps);
d87fdac3 1571 }
230d2906 1572 catch (const gdb_exception &error)
d87fdac3 1573 {
8ffd39f2 1574 btrace_finalize_ftrace (tp, gaps);
d87fdac3 1575
eedc3f4f 1576 throw;
d87fdac3 1577 }
d87fdac3 1578
8ffd39f2 1579 btrace_finalize_ftrace (tp, gaps);
d87fdac3
MM
1580}
1581
6e07b1d2
MM
1582/* Add an entry for the current PC. */
1583
1584static void
1585btrace_add_pc (struct thread_info *tp)
1586{
734b0e4b 1587 struct btrace_data btrace;
6e07b1d2 1588 struct regcache *regcache;
6e07b1d2
MM
1589 CORE_ADDR pc;
1590
00431a78 1591 regcache = get_thread_regcache (tp);
6e07b1d2
MM
1592 pc = regcache_read_pc (regcache);
1593
734b0e4b 1594 btrace.format = BTRACE_FORMAT_BTS;
a8b3b8e9 1595 btrace.variant.bts.blocks = new std::vector<btrace_block>;
6e07b1d2 1596
46f29a9a 1597 btrace.variant.bts.blocks->emplace_back (pc, pc);
6e07b1d2 1598
4a4495d6 1599 btrace_compute_ftrace (tp, &btrace, NULL);
6e07b1d2
MM
1600}
1601
02d27625
MM
1602/* See btrace.h. */
1603
1604void
f4abbc16 1605btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1606{
1607 if (tp->btrace.target != NULL)
5897fd49
MM
1608 error (_("Recording already enabled on thread %s (%s)."),
1609 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
02d27625 1610
46a3515b
MM
1611#if !defined (HAVE_LIBIPT)
1612 if (conf->format == BTRACE_FORMAT_PT)
c4e12631 1613 error (_("Intel Processor Trace support was disabled at compile time."));
46a3515b
MM
1614#endif /* !defined (HAVE_LIBIPT) */
1615
43792cf0 1616 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
e53c95d4 1617 tp->ptid.to_string ().c_str ());
02d27625 1618
696c0d5e 1619 tp->btrace.target = target_enable_btrace (tp, conf);
6e07b1d2 1620
cd4007e4 1621 if (tp->btrace.target == NULL)
5897fd49
MM
1622 error (_("Failed to enable recording on thread %s (%s)."),
1623 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
cd4007e4
MM
1624
1625 /* We need to undo the enable in case of errors. */
a70b8144 1626 try
cd4007e4
MM
1627 {
1628 /* Add an entry for the current PC so we start tracing from where we
1629 enabled it.
1630
1631 If we can't access TP's registers, TP is most likely running. In this
1632 case, we can't really say where tracing was enabled so it should be
1633 safe to simply skip this step.
1634
1635 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1636 start at the PC at which tracing was enabled. */
1637 if (conf->format != BTRACE_FORMAT_PT
00431a78 1638 && can_access_registers_thread (tp))
cd4007e4
MM
1639 btrace_add_pc (tp);
1640 }
230d2906 1641 catch (const gdb_exception &exception)
cd4007e4
MM
1642 {
1643 btrace_disable (tp);
1644
eedc3f4f 1645 throw;
cd4007e4 1646 }
02d27625
MM
1647}
1648
1649/* See btrace.h. */
1650
f4abbc16
MM
1651const struct btrace_config *
1652btrace_conf (const struct btrace_thread_info *btinfo)
1653{
1654 if (btinfo->target == NULL)
1655 return NULL;
1656
1657 return target_btrace_conf (btinfo->target);
1658}
1659
1660/* See btrace.h. */
1661
02d27625
MM
1662void
1663btrace_disable (struct thread_info *tp)
1664{
1665 struct btrace_thread_info *btp = &tp->btrace;
02d27625
MM
1666
1667 if (btp->target == NULL)
5897fd49
MM
1668 error (_("Recording not enabled on thread %s (%s)."),
1669 print_thread_id (tp), target_pid_to_str (tp->ptid).c_str ());
02d27625 1670
43792cf0 1671 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
e53c95d4 1672 tp->ptid.to_string ().c_str ());
02d27625
MM
1673
1674 target_disable_btrace (btp->target);
1675 btp->target = NULL;
1676
1677 btrace_clear (tp);
1678}
1679
1680/* See btrace.h. */
1681
1682void
1683btrace_teardown (struct thread_info *tp)
1684{
1685 struct btrace_thread_info *btp = &tp->btrace;
02d27625
MM
1686
1687 if (btp->target == NULL)
1688 return;
1689
43792cf0 1690 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
e53c95d4 1691 tp->ptid.to_string ().c_str ());
02d27625
MM
1692
1693 target_teardown_btrace (btp->target);
1694 btp->target = NULL;
1695
1696 btrace_clear (tp);
1697}
1698
734b0e4b 1699/* Stitch branch trace in BTS format. */
969c39fb
MM
1700
1701static int
31fd9caa 1702btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1703{
31fd9caa 1704 struct btrace_thread_info *btinfo;
969c39fb 1705 struct btrace_function *last_bfun;
46f29a9a 1706 btrace_block *first_new_block;
969c39fb 1707
31fd9caa 1708 btinfo = &tp->btrace;
b54b03bd 1709 gdb_assert (!btinfo->functions.empty ());
46f29a9a 1710 gdb_assert (!btrace->blocks->empty ());
31fd9caa 1711
08c3f6d2 1712 last_bfun = &btinfo->functions.back ();
b54b03bd 1713
31fd9caa
MM
1714 /* If the existing trace ends with a gap, we just glue the traces
1715 together. We need to drop the last (i.e. chronologically first) block
1716 of the new trace, though, since we can't fill in the start address.*/
0860c437 1717 if (last_bfun->insn.empty ())
31fd9caa 1718 {
46f29a9a 1719 btrace->blocks->pop_back ();
31fd9caa
MM
1720 return 0;
1721 }
969c39fb
MM
1722
1723 /* Beware that block trace starts with the most recent block, so the
1724 chronologically first block in the new trace is the last block in
1725 the new trace's block vector. */
46f29a9a 1726 first_new_block = &btrace->blocks->back ();
0860c437 1727 const btrace_insn &last_insn = last_bfun->insn.back ();
969c39fb
MM
1728
1729 /* If the current PC at the end of the block is the same as in our current
1730 trace, there are two explanations:
1731 1. we executed the instruction and some branch brought us back.
1732 2. we have not made any progress.
1733 In the first case, the delta trace vector should contain at least two
1734 entries.
1735 In the second case, the delta trace vector should contain exactly one
1736 entry for the partial block containing the current PC. Remove it. */
46f29a9a 1737 if (first_new_block->end == last_insn.pc && btrace->blocks->size () == 1)
969c39fb 1738 {
46f29a9a 1739 btrace->blocks->pop_back ();
969c39fb
MM
1740 return 0;
1741 }
1742
0860c437 1743 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (&last_insn),
969c39fb
MM
1744 core_addr_to_string_nz (first_new_block->end));
1745
1746 /* Do a simple sanity check to make sure we don't accidentally end up
1747 with a bad block. This should not occur in practice. */
0860c437 1748 if (first_new_block->end < last_insn.pc)
969c39fb
MM
1749 {
1750 warning (_("Error while trying to read delta trace. Falling back to "
1751 "a full read."));
1752 return -1;
1753 }
1754
1755 /* We adjust the last block to start at the end of our current trace. */
1756 gdb_assert (first_new_block->begin == 0);
0860c437 1757 first_new_block->begin = last_insn.pc;
969c39fb
MM
1758
1759 /* We simply pop the last insn so we can insert it again as part of
1760 the normal branch trace computation.
1761 Since instruction iterators are based on indices in the instructions
1762 vector, we don't leave any pointers dangling. */
1763 DEBUG ("pruning insn at %s for stitching",
0860c437 1764 ftrace_print_insn_addr (&last_insn));
969c39fb 1765
0860c437 1766 last_bfun->insn.pop_back ();
969c39fb
MM
1767
1768 /* The instructions vector may become empty temporarily if this has
1769 been the only instruction in this function segment.
1770 This violates the invariant but will be remedied shortly by
1771 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1772
1773 /* The only case where this would hurt is if the entire trace consisted
1774 of just that one instruction. If we remove it, we might turn the now
1775 empty btrace function segment into a gap. But we don't want gaps at
1776 the beginning. To avoid this, we remove the entire old trace. */
0860c437 1777 if (last_bfun->number == 1 && last_bfun->insn.empty ())
31fd9caa
MM
1778 btrace_clear (tp);
1779
969c39fb
MM
1780 return 0;
1781}
1782
734b0e4b
MM
1783/* Adjust the block trace in order to stitch old and new trace together.
1784 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1785 TP is the traced thread.
1786 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1787 Return 0 on success, -1 otherwise. */
1788
1789static int
31fd9caa 1790btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1791{
1792 /* If we don't have trace, there's nothing to do. */
8dcc53b3 1793 if (btrace->empty ())
734b0e4b
MM
1794 return 0;
1795
1796 switch (btrace->format)
1797 {
1798 case BTRACE_FORMAT_NONE:
1799 return 0;
1800
1801 case BTRACE_FORMAT_BTS:
31fd9caa 1802 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1803
1804 case BTRACE_FORMAT_PT:
1805 /* Delta reads are not supported. */
1806 return -1;
734b0e4b
MM
1807 }
1808
f34652de 1809 internal_error (_("Unknown branch trace format."));
734b0e4b
MM
1810}
1811
969c39fb
MM
1812/* Clear the branch trace histories in BTINFO. */
1813
1814static void
1815btrace_clear_history (struct btrace_thread_info *btinfo)
1816{
1817 xfree (btinfo->insn_history);
1818 xfree (btinfo->call_history);
1819 xfree (btinfo->replay);
1820
1821 btinfo->insn_history = NULL;
1822 btinfo->call_history = NULL;
1823 btinfo->replay = NULL;
1824}
1825
b0627500
MM
1826/* Clear the branch trace maintenance histories in BTINFO. */
1827
1828static void
1829btrace_maint_clear (struct btrace_thread_info *btinfo)
1830{
1831 switch (btinfo->data.format)
1832 {
1833 default:
1834 break;
1835
1836 case BTRACE_FORMAT_BTS:
1837 btinfo->maint.variant.bts.packet_history.begin = 0;
1838 btinfo->maint.variant.bts.packet_history.end = 0;
1839 break;
1840
1841#if defined (HAVE_LIBIPT)
1842 case BTRACE_FORMAT_PT:
554ac434 1843 delete btinfo->maint.variant.pt.packets;
b0627500
MM
1844
1845 btinfo->maint.variant.pt.packets = NULL;
1846 btinfo->maint.variant.pt.packet_history.begin = 0;
1847 btinfo->maint.variant.pt.packet_history.end = 0;
1848 break;
1849#endif /* defined (HAVE_LIBIPT) */
1850 }
1851}
1852
02d27625
MM
1853/* See btrace.h. */
1854
508352a9
TW
1855const char *
1856btrace_decode_error (enum btrace_format format, int errcode)
1857{
1858 switch (format)
1859 {
1860 case BTRACE_FORMAT_BTS:
1861 switch (errcode)
1862 {
1863 case BDE_BTS_OVERFLOW:
1864 return _("instruction overflow");
1865
1866 case BDE_BTS_INSN_SIZE:
1867 return _("unknown instruction");
1868
1869 default:
1870 break;
1871 }
1872 break;
1873
1874#if defined (HAVE_LIBIPT)
1875 case BTRACE_FORMAT_PT:
1876 switch (errcode)
1877 {
1878 case BDE_PT_USER_QUIT:
1879 return _("trace decode cancelled");
1880
1881 case BDE_PT_DISABLED:
1882 return _("disabled");
1883
1884 case BDE_PT_OVERFLOW:
1885 return _("overflow");
1886
1887 default:
1888 if (errcode < 0)
1889 return pt_errstr (pt_errcode (errcode));
1890 break;
1891 }
1892 break;
1893#endif /* defined (HAVE_LIBIPT) */
1894
1895 default:
1896 break;
1897 }
1898
1899 return _("unknown");
1900}
1901
1902/* See btrace.h. */
1903
02d27625 1904void
4a4495d6 1905btrace_fetch (struct thread_info *tp, const struct btrace_cpu *cpu)
02d27625
MM
1906{
1907 struct btrace_thread_info *btinfo;
969c39fb 1908 struct btrace_target_info *tinfo;
734b0e4b 1909 struct btrace_data btrace;
969c39fb 1910 int errcode;
02d27625 1911
43792cf0 1912 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
e53c95d4 1913 tp->ptid.to_string ().c_str ());
02d27625
MM
1914
1915 btinfo = &tp->btrace;
969c39fb
MM
1916 tinfo = btinfo->target;
1917 if (tinfo == NULL)
1918 return;
1919
1920 /* There's no way we could get new trace while replaying.
1921 On the other hand, delta trace would return a partial record with the
1922 current PC, which is the replay PC, not the last PC, as expected. */
1923 if (btinfo->replay != NULL)
02d27625
MM
1924 return;
1925
86e57d1b
PA
1926 /* With CLI usage, TP is always the current thread when we get here.
1927 However, since we can also store a gdb.Record object in Python
1928 referring to a different thread than the current one, we need to
1929 temporarily set the current thread. */
1930 scoped_restore_current_thread restore_thread;
1931 switch_to_thread (tp);
ae20e79a 1932
cd4007e4 1933 /* We should not be called on running or exited threads. */
00431a78 1934 gdb_assert (can_access_registers_thread (tp));
cd4007e4 1935
969c39fb 1936 /* Let's first try to extend the trace we already have. */
b54b03bd 1937 if (!btinfo->functions.empty ())
969c39fb
MM
1938 {
1939 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1940 if (errcode == 0)
1941 {
1942 /* Success. Let's try to stitch the traces together. */
31fd9caa 1943 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1944 }
1945 else
1946 {
1947 /* We failed to read delta trace. Let's try to read new trace. */
1948 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1949
1950 /* If we got any new trace, discard what we have. */
8dcc53b3 1951 if (errcode == 0 && !btrace.empty ())
969c39fb
MM
1952 btrace_clear (tp);
1953 }
1954
1955 /* If we were not able to read the trace, we start over. */
1956 if (errcode != 0)
1957 {
1958 btrace_clear (tp);
1959 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1960 }
1961 }
1962 else
1963 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1964
1965 /* If we were not able to read the branch trace, signal an error. */
1966 if (errcode != 0)
1967 error (_("Failed to read branch trace."));
1968
1969 /* Compute the trace, provided we have any. */
8dcc53b3 1970 if (!btrace.empty ())
23a7fe75 1971 {
9be54cae
MM
1972 /* Store the raw trace data. The stored data will be cleared in
1973 btrace_clear, so we always append the new trace. */
1974 btrace_data_append (&btinfo->data, &btrace);
b0627500 1975 btrace_maint_clear (btinfo);
9be54cae 1976
969c39fb 1977 btrace_clear_history (btinfo);
4a4495d6 1978 btrace_compute_ftrace (tp, &btrace, cpu);
23a7fe75 1979 }
02d27625
MM
1980}
1981
1982/* See btrace.h. */
1983
1984void
1985btrace_clear (struct thread_info *tp)
1986{
1987 struct btrace_thread_info *btinfo;
1988
43792cf0 1989 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
e53c95d4 1990 tp->ptid.to_string ().c_str ());
02d27625 1991
0b722aec
MM
1992 /* Make sure btrace frames that may hold a pointer into the branch
1993 trace data are destroyed. */
1994 reinit_frame_cache ();
1995
02d27625 1996 btinfo = &tp->btrace;
23a7fe75 1997
17b89b34 1998 btinfo->functions.clear ();
31fd9caa 1999 btinfo->ngaps = 0;
23a7fe75 2000
b0627500
MM
2001 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2002 btrace_maint_clear (btinfo);
8dcc53b3 2003 btinfo->data.clear ();
969c39fb 2004 btrace_clear_history (btinfo);
02d27625
MM
2005}
2006
2007/* See btrace.h. */
2008
2009void
2010btrace_free_objfile (struct objfile *objfile)
2011{
02d27625
MM
2012 DEBUG ("free objfile");
2013
08036331 2014 for (thread_info *tp : all_non_exited_threads ())
02d27625
MM
2015 btrace_clear (tp);
2016}
c12a2917 2017
23a7fe75
MM
2018/* See btrace.h. */
2019
2020const struct btrace_insn *
2021btrace_insn_get (const struct btrace_insn_iterator *it)
2022{
2023 const struct btrace_function *bfun;
2024 unsigned int index, end;
2025
a0f1b963 2026 index = it->insn_index;
08c3f6d2 2027 bfun = &it->btinfo->functions[it->call_index];
23a7fe75 2028
31fd9caa
MM
2029 /* Check if the iterator points to a gap in the trace. */
2030 if (bfun->errcode != 0)
2031 return NULL;
2032
23a7fe75 2033 /* The index is within the bounds of this function's instruction vector. */
0860c437 2034 end = bfun->insn.size ();
23a7fe75
MM
2035 gdb_assert (0 < end);
2036 gdb_assert (index < end);
2037
0860c437 2038 return &bfun->insn[index];
23a7fe75
MM
2039}
2040
2041/* See btrace.h. */
2042
69090cee
TW
2043int
2044btrace_insn_get_error (const struct btrace_insn_iterator *it)
23a7fe75 2045{
08c3f6d2 2046 return it->btinfo->functions[it->call_index].errcode;
69090cee 2047}
31fd9caa 2048
69090cee 2049/* See btrace.h. */
31fd9caa 2050
69090cee
TW
2051unsigned int
2052btrace_insn_number (const struct btrace_insn_iterator *it)
2053{
08c3f6d2 2054 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
23a7fe75
MM
2055}
2056
2057/* See btrace.h. */
2058
2059void
2060btrace_insn_begin (struct btrace_insn_iterator *it,
2061 const struct btrace_thread_info *btinfo)
2062{
b54b03bd 2063 if (btinfo->functions.empty ())
23a7fe75
MM
2064 error (_("No trace."));
2065
521103fd 2066 it->btinfo = btinfo;
a0f1b963
TW
2067 it->call_index = 0;
2068 it->insn_index = 0;
23a7fe75
MM
2069}
2070
2071/* See btrace.h. */
2072
2073void
2074btrace_insn_end (struct btrace_insn_iterator *it,
2075 const struct btrace_thread_info *btinfo)
2076{
2077 const struct btrace_function *bfun;
2078 unsigned int length;
2079
b54b03bd 2080 if (btinfo->functions.empty ())
23a7fe75
MM
2081 error (_("No trace."));
2082
08c3f6d2 2083 bfun = &btinfo->functions.back ();
0860c437 2084 length = bfun->insn.size ();
23a7fe75 2085
31fd9caa
MM
2086 /* The last function may either be a gap or it contains the current
2087 instruction, which is one past the end of the execution trace; ignore
2088 it. */
2089 if (length > 0)
2090 length -= 1;
2091
521103fd 2092 it->btinfo = btinfo;
a0f1b963
TW
2093 it->call_index = bfun->number - 1;
2094 it->insn_index = length;
23a7fe75
MM
2095}
2096
2097/* See btrace.h. */
2098
2099unsigned int
2100btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2101{
2102 const struct btrace_function *bfun;
2103 unsigned int index, steps;
2104
08c3f6d2 2105 bfun = &it->btinfo->functions[it->call_index];
23a7fe75 2106 steps = 0;
a0f1b963 2107 index = it->insn_index;
23a7fe75
MM
2108
2109 while (stride != 0)
2110 {
2111 unsigned int end, space, adv;
2112
0860c437 2113 end = bfun->insn.size ();
23a7fe75 2114
31fd9caa
MM
2115 /* An empty function segment represents a gap in the trace. We count
2116 it as one instruction. */
2117 if (end == 0)
2118 {
2119 const struct btrace_function *next;
2120
eb8f2b9c 2121 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
31fd9caa
MM
2122 if (next == NULL)
2123 break;
2124
2125 stride -= 1;
2126 steps += 1;
2127
2128 bfun = next;
2129 index = 0;
2130
2131 continue;
2132 }
2133
23a7fe75
MM
2134 gdb_assert (0 < end);
2135 gdb_assert (index < end);
2136
2137 /* Compute the number of instructions remaining in this segment. */
2138 space = end - index;
2139
2140 /* Advance the iterator as far as possible within this segment. */
325fac50 2141 adv = std::min (space, stride);
23a7fe75
MM
2142 stride -= adv;
2143 index += adv;
2144 steps += adv;
2145
2146 /* Move to the next function if we're at the end of this one. */
2147 if (index == end)
2148 {
2149 const struct btrace_function *next;
2150
eb8f2b9c 2151 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
23a7fe75
MM
2152 if (next == NULL)
2153 {
2154 /* We stepped past the last function.
2155
2156 Let's adjust the index to point to the last instruction in
2157 the previous function. */
2158 index -= 1;
2159 steps -= 1;
2160 break;
2161 }
2162
2163 /* We now point to the first instruction in the new function. */
2164 bfun = next;
2165 index = 0;
2166 }
2167
2168 /* We did make progress. */
2169 gdb_assert (adv > 0);
2170 }
2171
2172 /* Update the iterator. */
a0f1b963
TW
2173 it->call_index = bfun->number - 1;
2174 it->insn_index = index;
23a7fe75
MM
2175
2176 return steps;
2177}
2178
2179/* See btrace.h. */
2180
2181unsigned int
2182btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2183{
2184 const struct btrace_function *bfun;
2185 unsigned int index, steps;
2186
08c3f6d2 2187 bfun = &it->btinfo->functions[it->call_index];
23a7fe75 2188 steps = 0;
a0f1b963 2189 index = it->insn_index;
23a7fe75
MM
2190
2191 while (stride != 0)
2192 {
2193 unsigned int adv;
2194
2195 /* Move to the previous function if we're at the start of this one. */
2196 if (index == 0)
2197 {
2198 const struct btrace_function *prev;
2199
eb8f2b9c 2200 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
23a7fe75
MM
2201 if (prev == NULL)
2202 break;
2203
2204 /* We point to one after the last instruction in the new function. */
2205 bfun = prev;
0860c437 2206 index = bfun->insn.size ();
23a7fe75 2207
31fd9caa
MM
2208 /* An empty function segment represents a gap in the trace. We count
2209 it as one instruction. */
2210 if (index == 0)
2211 {
2212 stride -= 1;
2213 steps += 1;
2214
2215 continue;
2216 }
23a7fe75
MM
2217 }
2218
2219 /* Advance the iterator as far as possible within this segment. */
325fac50 2220 adv = std::min (index, stride);
31fd9caa 2221
23a7fe75
MM
2222 stride -= adv;
2223 index -= adv;
2224 steps += adv;
2225
2226 /* We did make progress. */
2227 gdb_assert (adv > 0);
2228 }
2229
2230 /* Update the iterator. */
a0f1b963
TW
2231 it->call_index = bfun->number - 1;
2232 it->insn_index = index;
23a7fe75
MM
2233
2234 return steps;
2235}
2236
2237/* See btrace.h. */
2238
2239int
2240btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2241 const struct btrace_insn_iterator *rhs)
2242{
a0f1b963 2243 gdb_assert (lhs->btinfo == rhs->btinfo);
23a7fe75 2244
a0f1b963
TW
2245 if (lhs->call_index != rhs->call_index)
2246 return lhs->call_index - rhs->call_index;
23a7fe75 2247
a0f1b963 2248 return lhs->insn_index - rhs->insn_index;
23a7fe75
MM
2249}
2250
2251/* See btrace.h. */
2252
2253int
2254btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2255 const struct btrace_thread_info *btinfo,
2256 unsigned int number)
2257{
2258 const struct btrace_function *bfun;
fdd2bd92 2259 unsigned int upper, lower;
23a7fe75 2260
2b51eddc 2261 if (btinfo->functions.empty ())
fdd2bd92 2262 return 0;
23a7fe75 2263
fdd2bd92 2264 lower = 0;
08c3f6d2 2265 bfun = &btinfo->functions[lower];
fdd2bd92 2266 if (number < bfun->insn_offset)
23a7fe75
MM
2267 return 0;
2268
2b51eddc 2269 upper = btinfo->functions.size () - 1;
08c3f6d2 2270 bfun = &btinfo->functions[upper];
fdd2bd92 2271 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
23a7fe75
MM
2272 return 0;
2273
fdd2bd92
TW
2274 /* We assume that there are no holes in the numbering. */
2275 for (;;)
2276 {
2277 const unsigned int average = lower + (upper - lower) / 2;
2278
08c3f6d2 2279 bfun = &btinfo->functions[average];
fdd2bd92
TW
2280
2281 if (number < bfun->insn_offset)
2282 {
2283 upper = average - 1;
2284 continue;
2285 }
2286
2287 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2288 {
2289 lower = average + 1;
2290 continue;
2291 }
2292
2293 break;
2294 }
2295
521103fd 2296 it->btinfo = btinfo;
a0f1b963
TW
2297 it->call_index = bfun->number - 1;
2298 it->insn_index = number - bfun->insn_offset;
23a7fe75
MM
2299 return 1;
2300}
2301
f158f208
TW
2302/* Returns true if the recording ends with a function segment that
2303 contains only a single (i.e. the current) instruction. */
2304
2305static bool
2306btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2307{
2308 const btrace_function *bfun;
2309
2310 if (btinfo->functions.empty ())
2311 return false;
2312
08c3f6d2 2313 bfun = &btinfo->functions.back ();
f158f208
TW
2314 if (bfun->errcode != 0)
2315 return false;
2316
2317 return ftrace_call_num_insn (bfun) == 1;
2318}
2319
23a7fe75
MM
2320/* See btrace.h. */
2321
2322const struct btrace_function *
2323btrace_call_get (const struct btrace_call_iterator *it)
2324{
f158f208
TW
2325 if (it->index >= it->btinfo->functions.size ())
2326 return NULL;
2327
08c3f6d2 2328 return &it->btinfo->functions[it->index];
23a7fe75
MM
2329}
2330
2331/* See btrace.h. */
2332
2333unsigned int
2334btrace_call_number (const struct btrace_call_iterator *it)
2335{
f158f208 2336 const unsigned int length = it->btinfo->functions.size ();
23a7fe75 2337
f158f208
TW
2338 /* If the last function segment contains only a single instruction (i.e. the
2339 current instruction), skip it. */
2340 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2341 return length;
23a7fe75 2342
f158f208 2343 return it->index + 1;
23a7fe75
MM
2344}
2345
2346/* See btrace.h. */
2347
2348void
2349btrace_call_begin (struct btrace_call_iterator *it,
2350 const struct btrace_thread_info *btinfo)
2351{
f158f208 2352 if (btinfo->functions.empty ())
23a7fe75
MM
2353 error (_("No trace."));
2354
2355 it->btinfo = btinfo;
f158f208 2356 it->index = 0;
23a7fe75
MM
2357}
2358
2359/* See btrace.h. */
2360
2361void
2362btrace_call_end (struct btrace_call_iterator *it,
2363 const struct btrace_thread_info *btinfo)
2364{
f158f208 2365 if (btinfo->functions.empty ())
23a7fe75
MM
2366 error (_("No trace."));
2367
2368 it->btinfo = btinfo;
f158f208 2369 it->index = btinfo->functions.size ();
23a7fe75
MM
2370}
2371
2372/* See btrace.h. */
2373
2374unsigned int
2375btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2376{
f158f208 2377 const unsigned int length = it->btinfo->functions.size ();
23a7fe75 2378
f158f208
TW
2379 if (it->index + stride < length - 1)
2380 /* Default case: Simply advance the iterator. */
2381 it->index += stride;
2382 else if (it->index + stride == length - 1)
23a7fe75 2383 {
f158f208
TW
2384 /* We land exactly at the last function segment. If it contains only one
2385 instruction (i.e. the current instruction) it is not actually part of
2386 the trace. */
2387 if (btrace_ends_with_single_insn (it->btinfo))
2388 it->index = length;
2389 else
2390 it->index = length - 1;
2391 }
2392 else
2393 {
2394 /* We land past the last function segment and have to adjust the stride.
2395 If the last function segment contains only one instruction (i.e. the
2396 current instruction) it is not actually part of the trace. */
2397 if (btrace_ends_with_single_insn (it->btinfo))
2398 stride = length - it->index - 1;
2399 else
2400 stride = length - it->index;
23a7fe75 2401
f158f208 2402 it->index = length;
23a7fe75
MM
2403 }
2404
f158f208 2405 return stride;
23a7fe75
MM
2406}
2407
2408/* See btrace.h. */
2409
2410unsigned int
2411btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2412{
f158f208
TW
2413 const unsigned int length = it->btinfo->functions.size ();
2414 int steps = 0;
23a7fe75 2415
f158f208 2416 gdb_assert (it->index <= length);
23a7fe75 2417
f158f208
TW
2418 if (stride == 0 || it->index == 0)
2419 return 0;
23a7fe75 2420
f158f208
TW
2421 /* If we are at the end, the first step is a special case. If the last
2422 function segment contains only one instruction (i.e. the current
2423 instruction) it is not actually part of the trace. To be able to step
2424 over this instruction, we need at least one more function segment. */
2425 if ((it->index == length) && (length > 1))
23a7fe75 2426 {
f158f208
TW
2427 if (btrace_ends_with_single_insn (it->btinfo))
2428 it->index = length - 2;
2429 else
2430 it->index = length - 1;
23a7fe75 2431
f158f208
TW
2432 steps = 1;
2433 stride -= 1;
23a7fe75
MM
2434 }
2435
f158f208
TW
2436 stride = std::min (stride, it->index);
2437
2438 it->index -= stride;
2439 return steps + stride;
23a7fe75
MM
2440}
2441
2442/* See btrace.h. */
2443
2444int
2445btrace_call_cmp (const struct btrace_call_iterator *lhs,
2446 const struct btrace_call_iterator *rhs)
2447{
f158f208
TW
2448 gdb_assert (lhs->btinfo == rhs->btinfo);
2449 return (int) (lhs->index - rhs->index);
23a7fe75
MM
2450}
2451
2452/* See btrace.h. */
2453
2454int
2455btrace_find_call_by_number (struct btrace_call_iterator *it,
2456 const struct btrace_thread_info *btinfo,
2457 unsigned int number)
2458{
f158f208 2459 const unsigned int length = btinfo->functions.size ();
23a7fe75 2460
f158f208
TW
2461 if ((number == 0) || (number > length))
2462 return 0;
23a7fe75 2463
f158f208
TW
2464 it->btinfo = btinfo;
2465 it->index = number - 1;
2466 return 1;
23a7fe75
MM
2467}
2468
2469/* See btrace.h. */
2470
2471void
2472btrace_set_insn_history (struct btrace_thread_info *btinfo,
2473 const struct btrace_insn_iterator *begin,
2474 const struct btrace_insn_iterator *end)
2475{
2476 if (btinfo->insn_history == NULL)
8d749320 2477 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2478
2479 btinfo->insn_history->begin = *begin;
2480 btinfo->insn_history->end = *end;
2481}
2482
2483/* See btrace.h. */
2484
2485void
2486btrace_set_call_history (struct btrace_thread_info *btinfo,
2487 const struct btrace_call_iterator *begin,
2488 const struct btrace_call_iterator *end)
2489{
2490 gdb_assert (begin->btinfo == end->btinfo);
2491
2492 if (btinfo->call_history == NULL)
8d749320 2493 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2494
2495 btinfo->call_history->begin = *begin;
2496 btinfo->call_history->end = *end;
2497}
07bbe694
MM
2498
2499/* See btrace.h. */
2500
2501int
2502btrace_is_replaying (struct thread_info *tp)
2503{
2504 return tp->btrace.replay != NULL;
2505}
6e07b1d2
MM
2506
2507/* See btrace.h. */
2508
2509int
2510btrace_is_empty (struct thread_info *tp)
2511{
2512 struct btrace_insn_iterator begin, end;
2513 struct btrace_thread_info *btinfo;
2514
2515 btinfo = &tp->btrace;
2516
b54b03bd 2517 if (btinfo->functions.empty ())
6e07b1d2
MM
2518 return 1;
2519
2520 btrace_insn_begin (&begin, btinfo);
2521 btrace_insn_end (&end, btinfo);
2522
2523 return btrace_insn_cmp (&begin, &end) == 0;
2524}
734b0e4b 2525
b0627500
MM
2526#if defined (HAVE_LIBIPT)
2527
2528/* Print a single packet. */
2529
2530static void
2531pt_print_packet (const struct pt_packet *packet)
2532{
2533 switch (packet->type)
2534 {
2535 default:
6cb06a8c 2536 gdb_printf (("[??: %x]"), packet->type);
b0627500
MM
2537 break;
2538
2539 case ppt_psb:
6cb06a8c 2540 gdb_printf (("psb"));
b0627500
MM
2541 break;
2542
2543 case ppt_psbend:
6cb06a8c 2544 gdb_printf (("psbend"));
b0627500
MM
2545 break;
2546
2547 case ppt_pad:
6cb06a8c 2548 gdb_printf (("pad"));
b0627500
MM
2549 break;
2550
2551 case ppt_tip:
6cb06a8c
TT
2552 gdb_printf (("tip %u: 0x%" PRIx64 ""),
2553 packet->payload.ip.ipc,
2554 packet->payload.ip.ip);
b0627500
MM
2555 break;
2556
2557 case ppt_tip_pge:
6cb06a8c
TT
2558 gdb_printf (("tip.pge %u: 0x%" PRIx64 ""),
2559 packet->payload.ip.ipc,
2560 packet->payload.ip.ip);
b0627500
MM
2561 break;
2562
2563 case ppt_tip_pgd:
6cb06a8c
TT
2564 gdb_printf (("tip.pgd %u: 0x%" PRIx64 ""),
2565 packet->payload.ip.ipc,
2566 packet->payload.ip.ip);
b0627500
MM
2567 break;
2568
2569 case ppt_fup:
6cb06a8c
TT
2570 gdb_printf (("fup %u: 0x%" PRIx64 ""),
2571 packet->payload.ip.ipc,
2572 packet->payload.ip.ip);
b0627500
MM
2573 break;
2574
2575 case ppt_tnt_8:
6cb06a8c
TT
2576 gdb_printf (("tnt-8 %u: 0x%" PRIx64 ""),
2577 packet->payload.tnt.bit_size,
2578 packet->payload.tnt.payload);
b0627500
MM
2579 break;
2580
2581 case ppt_tnt_64:
6cb06a8c
TT
2582 gdb_printf (("tnt-64 %u: 0x%" PRIx64 ""),
2583 packet->payload.tnt.bit_size,
2584 packet->payload.tnt.payload);
b0627500
MM
2585 break;
2586
2587 case ppt_pip:
6cb06a8c
TT
2588 gdb_printf (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2589 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2590 break;
2591
2592 case ppt_tsc:
6cb06a8c 2593 gdb_printf (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
b0627500
MM
2594 break;
2595
2596 case ppt_cbr:
6cb06a8c 2597 gdb_printf (("cbr %u"), packet->payload.cbr.ratio);
b0627500
MM
2598 break;
2599
2600 case ppt_mode:
2601 switch (packet->payload.mode.leaf)
2602 {
2603 default:
6cb06a8c 2604 gdb_printf (("mode %u"), packet->payload.mode.leaf);
b0627500
MM
2605 break;
2606
2607 case pt_mol_exec:
6cb06a8c
TT
2608 gdb_printf (("mode.exec%s%s"),
2609 packet->payload.mode.bits.exec.csl
2610 ? (" cs.l") : (""),
2611 packet->payload.mode.bits.exec.csd
2612 ? (" cs.d") : (""));
b0627500
MM
2613 break;
2614
2615 case pt_mol_tsx:
6cb06a8c
TT
2616 gdb_printf (("mode.tsx%s%s"),
2617 packet->payload.mode.bits.tsx.intx
2618 ? (" intx") : (""),
2619 packet->payload.mode.bits.tsx.abrt
2620 ? (" abrt") : (""));
b0627500
MM
2621 break;
2622 }
2623 break;
2624
2625 case ppt_ovf:
6cb06a8c 2626 gdb_printf (("ovf"));
b0627500
MM
2627 break;
2628
37fdfe4c 2629 case ppt_stop:
6cb06a8c 2630 gdb_printf (("stop"));
37fdfe4c
MM
2631 break;
2632
2633 case ppt_vmcs:
6cb06a8c 2634 gdb_printf (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
37fdfe4c
MM
2635 break;
2636
2637 case ppt_tma:
6cb06a8c
TT
2638 gdb_printf (("tma %x %x"), packet->payload.tma.ctc,
2639 packet->payload.tma.fc);
37fdfe4c
MM
2640 break;
2641
2642 case ppt_mtc:
6cb06a8c 2643 gdb_printf (("mtc %x"), packet->payload.mtc.ctc);
37fdfe4c
MM
2644 break;
2645
2646 case ppt_cyc:
6cb06a8c 2647 gdb_printf (("cyc %" PRIx64 ""), packet->payload.cyc.value);
37fdfe4c
MM
2648 break;
2649
2650 case ppt_mnt:
6cb06a8c 2651 gdb_printf (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
37fdfe4c 2652 break;
b0627500
MM
2653 }
2654}
2655
2656/* Decode packets into MAINT using DECODER. */
2657
2658static void
2659btrace_maint_decode_pt (struct btrace_maint_info *maint,
2660 struct pt_packet_decoder *decoder)
2661{
2662 int errcode;
2663
554ac434 2664 if (maint->variant.pt.packets == NULL)
a8b3b8e9 2665 maint->variant.pt.packets = new std::vector<btrace_pt_packet>;
554ac434 2666
b0627500
MM
2667 for (;;)
2668 {
2669 struct btrace_pt_packet packet;
2670
2671 errcode = pt_pkt_sync_forward (decoder);
2672 if (errcode < 0)
2673 break;
2674
2675 for (;;)
2676 {
2677 pt_pkt_get_offset (decoder, &packet.offset);
2678
2679 errcode = pt_pkt_next (decoder, &packet.packet,
2680 sizeof(packet.packet));
2681 if (errcode < 0)
2682 break;
2683
2684 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2685 {
2686 packet.errcode = pt_errcode (errcode);
554ac434 2687 maint->variant.pt.packets->push_back (packet);
b0627500
MM
2688 }
2689 }
2690
2691 if (errcode == -pte_eos)
2692 break;
2693
2694 packet.errcode = pt_errcode (errcode);
554ac434 2695 maint->variant.pt.packets->push_back (packet);
b0627500
MM
2696
2697 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2698 packet.offset, pt_errstr (packet.errcode));
2699 }
2700
2701 if (errcode != -pte_eos)
bc504a31 2702 warning (_("Failed to synchronize onto the Intel Processor Trace "
b0627500
MM
2703 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2704}
2705
2706/* Update the packet history in BTINFO. */
2707
2708static void
2709btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2710{
b0627500 2711 struct pt_packet_decoder *decoder;
4a4495d6 2712 const struct btrace_cpu *cpu;
b0627500
MM
2713 struct btrace_data_pt *pt;
2714 struct pt_config config;
2715 int errcode;
2716
2717 pt = &btinfo->data.variant.pt;
2718
2719 /* Nothing to do if there is no trace. */
2720 if (pt->size == 0)
2721 return;
2722
2723 memset (&config, 0, sizeof(config));
2724
2725 config.size = sizeof (config);
2726 config.begin = pt->data;
2727 config.end = pt->data + pt->size;
2728
4a4495d6
MM
2729 cpu = record_btrace_get_cpu ();
2730 if (cpu == nullptr)
2731 cpu = &pt->config.cpu;
2732
2733 /* We treat an unknown vendor as 'no errata'. */
2734 if (cpu->vendor != CV_UNKNOWN)
2735 {
2736 config.cpu.vendor = pt_translate_cpu_vendor (cpu->vendor);
2737 config.cpu.family = cpu->family;
2738 config.cpu.model = cpu->model;
2739 config.cpu.stepping = cpu->stepping;
b0627500 2740
4a4495d6
MM
2741 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2742 if (errcode < 0)
2743 error (_("Failed to configure the Intel Processor Trace "
2744 "decoder: %s."), pt_errstr (pt_errcode (errcode)));
2745 }
b0627500
MM
2746
2747 decoder = pt_pkt_alloc_decoder (&config);
2748 if (decoder == NULL)
bc504a31 2749 error (_("Failed to allocate the Intel Processor Trace decoder."));
b0627500 2750
a70b8144 2751 try
b0627500
MM
2752 {
2753 btrace_maint_decode_pt (&btinfo->maint, decoder);
2754 }
230d2906 2755 catch (const gdb_exception &except)
b0627500
MM
2756 {
2757 pt_pkt_free_decoder (decoder);
2758
2759 if (except.reason < 0)
eedc3f4f 2760 throw;
b0627500 2761 }
b0627500
MM
2762
2763 pt_pkt_free_decoder (decoder);
2764}
2765
2766#endif /* !defined (HAVE_LIBIPT) */
2767
2768/* Update the packet maintenance information for BTINFO and store the
2769 low and high bounds into BEGIN and END, respectively.
2770 Store the current iterator state into FROM and TO. */
2771
2772static void
2773btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2774 unsigned int *begin, unsigned int *end,
2775 unsigned int *from, unsigned int *to)
2776{
2777 switch (btinfo->data.format)
2778 {
2779 default:
2780 *begin = 0;
2781 *end = 0;
2782 *from = 0;
2783 *to = 0;
2784 break;
2785
2786 case BTRACE_FORMAT_BTS:
2787 /* Nothing to do - we operate directly on BTINFO->DATA. */
2788 *begin = 0;
46f29a9a 2789 *end = btinfo->data.variant.bts.blocks->size ();
b0627500
MM
2790 *from = btinfo->maint.variant.bts.packet_history.begin;
2791 *to = btinfo->maint.variant.bts.packet_history.end;
2792 break;
2793
2794#if defined (HAVE_LIBIPT)
2795 case BTRACE_FORMAT_PT:
554ac434 2796 if (btinfo->maint.variant.pt.packets == nullptr)
a8b3b8e9 2797 btinfo->maint.variant.pt.packets = new std::vector<btrace_pt_packet>;
554ac434
AB
2798
2799 if (btinfo->maint.variant.pt.packets->empty ())
b0627500
MM
2800 btrace_maint_update_pt_packets (btinfo);
2801
2802 *begin = 0;
554ac434 2803 *end = btinfo->maint.variant.pt.packets->size ();
b0627500
MM
2804 *from = btinfo->maint.variant.pt.packet_history.begin;
2805 *to = btinfo->maint.variant.pt.packet_history.end;
2806 break;
2807#endif /* defined (HAVE_LIBIPT) */
2808 }
2809}
2810
2811/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2812 update the current iterator position. */
2813
2814static void
2815btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2816 unsigned int begin, unsigned int end)
2817{
2818 switch (btinfo->data.format)
2819 {
2820 default:
2821 break;
2822
2823 case BTRACE_FORMAT_BTS:
2824 {
a8b3b8e9 2825 const std::vector<btrace_block> &blocks
46f29a9a 2826 = *btinfo->data.variant.bts.blocks;
b0627500
MM
2827 unsigned int blk;
2828
b0627500
MM
2829 for (blk = begin; blk < end; ++blk)
2830 {
46f29a9a 2831 const btrace_block &block = blocks.at (blk);
b0627500 2832
6cb06a8c
TT
2833 gdb_printf ("%u\tbegin: %s, end: %s\n", blk,
2834 core_addr_to_string_nz (block.begin),
2835 core_addr_to_string_nz (block.end));
b0627500
MM
2836 }
2837
2838 btinfo->maint.variant.bts.packet_history.begin = begin;
2839 btinfo->maint.variant.bts.packet_history.end = end;
2840 }
2841 break;
2842
2843#if defined (HAVE_LIBIPT)
2844 case BTRACE_FORMAT_PT:
2845 {
a8b3b8e9 2846 const std::vector<btrace_pt_packet> &packets
554ac434 2847 = *btinfo->maint.variant.pt.packets;
b0627500
MM
2848 unsigned int pkt;
2849
b0627500
MM
2850 for (pkt = begin; pkt < end; ++pkt)
2851 {
554ac434 2852 const struct btrace_pt_packet &packet = packets.at (pkt);
b0627500 2853
6cb06a8c
TT
2854 gdb_printf ("%u\t", pkt);
2855 gdb_printf ("0x%" PRIx64 "\t", packet.offset);
b0627500 2856
554ac434
AB
2857 if (packet.errcode == pte_ok)
2858 pt_print_packet (&packet.packet);
b0627500 2859 else
6cb06a8c 2860 gdb_printf ("[error: %s]", pt_errstr (packet.errcode));
b0627500 2861
6cb06a8c 2862 gdb_printf ("\n");
b0627500
MM
2863 }
2864
2865 btinfo->maint.variant.pt.packet_history.begin = begin;
2866 btinfo->maint.variant.pt.packet_history.end = end;
2867 }
2868 break;
2869#endif /* defined (HAVE_LIBIPT) */
2870 }
2871}
2872
2873/* Read a number from an argument string. */
2874
2875static unsigned int
f938677d 2876get_uint (const char **arg)
b0627500 2877{
f938677d
TT
2878 const char *begin, *pos;
2879 char *end;
b0627500
MM
2880 unsigned long number;
2881
2882 begin = *arg;
2883 pos = skip_spaces (begin);
2884
2885 if (!isdigit (*pos))
2886 error (_("Expected positive number, got: %s."), pos);
2887
2888 number = strtoul (pos, &end, 10);
2889 if (number > UINT_MAX)
2890 error (_("Number too big."));
2891
2892 *arg += (end - begin);
2893
2894 return (unsigned int) number;
2895}
2896
2897/* Read a context size from an argument string. */
2898
2899static int
f938677d 2900get_context_size (const char **arg)
b0627500 2901{
f938677d 2902 const char *pos = skip_spaces (*arg);
b0627500
MM
2903
2904 if (!isdigit (*pos))
2905 error (_("Expected positive number, got: %s."), pos);
2906
f938677d
TT
2907 char *end;
2908 long result = strtol (pos, &end, 10);
2909 *arg = end;
2910 return result;
b0627500
MM
2911}
2912
2913/* Complain about junk at the end of an argument string. */
2914
2915static void
f938677d 2916no_chunk (const char *arg)
b0627500
MM
2917{
2918 if (*arg != 0)
2919 error (_("Junk after argument: %s."), arg);
2920}
2921
2922/* The "maintenance btrace packet-history" command. */
2923
2924static void
f938677d 2925maint_btrace_packet_history_cmd (const char *arg, int from_tty)
b0627500
MM
2926{
2927 struct btrace_thread_info *btinfo;
b0627500
MM
2928 unsigned int size, begin, end, from, to;
2929
3c8af02f 2930 thread_info *tp = current_inferior ()->find_thread (inferior_ptid);
b0627500
MM
2931 if (tp == NULL)
2932 error (_("No thread."));
2933
2934 size = 10;
2935 btinfo = &tp->btrace;
2936
2937 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2938 if (begin == end)
2939 {
6cb06a8c 2940 gdb_printf (_("No trace.\n"));
b0627500
MM
2941 return;
2942 }
2943
2944 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2945 {
2946 from = to;
2947
2948 if (end - from < size)
2949 size = end - from;
2950 to = from + size;
2951 }
2952 else if (strcmp (arg, "-") == 0)
2953 {
2954 to = from;
2955
2956 if (to - begin < size)
2957 size = to - begin;
2958 from = to - size;
2959 }
2960 else
2961 {
2962 from = get_uint (&arg);
2963 if (end <= from)
2964 error (_("'%u' is out of range."), from);
2965
2966 arg = skip_spaces (arg);
2967 if (*arg == ',')
2968 {
2969 arg = skip_spaces (++arg);
2970
2971 if (*arg == '+')
2972 {
2973 arg += 1;
2974 size = get_context_size (&arg);
2975
2976 no_chunk (arg);
2977
2978 if (end - from < size)
2979 size = end - from;
2980 to = from + size;
2981 }
2982 else if (*arg == '-')
2983 {
2984 arg += 1;
2985 size = get_context_size (&arg);
2986
2987 no_chunk (arg);
2988
2989 /* Include the packet given as first argument. */
2990 from += 1;
2991 to = from;
2992
2993 if (to - begin < size)
2994 size = to - begin;
2995 from = to - size;
2996 }
2997 else
2998 {
2999 to = get_uint (&arg);
3000
3001 /* Include the packet at the second argument and silently
3002 truncate the range. */
3003 if (to < end)
3004 to += 1;
3005 else
3006 to = end;
3007
3008 no_chunk (arg);
3009 }
3010 }
3011 else
3012 {
3013 no_chunk (arg);
3014
3015 if (end - from < size)
3016 size = end - from;
3017 to = from + size;
3018 }
3019
3020 dont_repeat ();
3021 }
3022
3023 btrace_maint_print_packets (btinfo, from, to);
3024}
3025
3026/* The "maintenance btrace clear-packet-history" command. */
3027
3028static void
f938677d 3029maint_btrace_clear_packet_history_cmd (const char *args, int from_tty)
b0627500 3030{
b0627500
MM
3031 if (args != NULL && *args != 0)
3032 error (_("Invalid argument."));
3033
00431a78 3034 if (inferior_ptid == null_ptid)
b0627500
MM
3035 error (_("No thread."));
3036
00431a78
PA
3037 thread_info *tp = inferior_thread ();
3038 btrace_thread_info *btinfo = &tp->btrace;
b0627500
MM
3039
3040 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3041 btrace_maint_clear (btinfo);
8dcc53b3 3042 btinfo->data.clear ();
b0627500
MM
3043}
3044
3045/* The "maintenance btrace clear" command. */
3046
3047static void
f938677d 3048maint_btrace_clear_cmd (const char *args, int from_tty)
b0627500 3049{
b0627500
MM
3050 if (args != NULL && *args != 0)
3051 error (_("Invalid argument."));
3052
00431a78 3053 if (inferior_ptid == null_ptid)
b0627500
MM
3054 error (_("No thread."));
3055
00431a78 3056 thread_info *tp = inferior_thread ();
b0627500
MM
3057 btrace_clear (tp);
3058}
3059
b0627500
MM
3060/* The "maintenance info btrace" command. */
3061
3062static void
f938677d 3063maint_info_btrace_cmd (const char *args, int from_tty)
b0627500
MM
3064{
3065 struct btrace_thread_info *btinfo;
b0627500
MM
3066 const struct btrace_config *conf;
3067
3068 if (args != NULL && *args != 0)
3069 error (_("Invalid argument."));
3070
00431a78 3071 if (inferior_ptid == null_ptid)
b0627500
MM
3072 error (_("No thread."));
3073
00431a78
PA
3074 thread_info *tp = inferior_thread ();
3075
b0627500
MM
3076 btinfo = &tp->btrace;
3077
3078 conf = btrace_conf (btinfo);
3079 if (conf == NULL)
3080 error (_("No btrace configuration."));
3081
6cb06a8c
TT
3082 gdb_printf (_("Format: %s.\n"),
3083 btrace_format_string (conf->format));
b0627500
MM
3084
3085 switch (conf->format)
3086 {
3087 default:
3088 break;
3089
3090 case BTRACE_FORMAT_BTS:
6cb06a8c
TT
3091 gdb_printf (_("Number of packets: %zu.\n"),
3092 btinfo->data.variant.bts.blocks->size ());
b0627500
MM
3093 break;
3094
3095#if defined (HAVE_LIBIPT)
3096 case BTRACE_FORMAT_PT:
3097 {
3098 struct pt_version version;
3099
3100 version = pt_library_version ();
6cb06a8c
TT
3101 gdb_printf (_("Version: %u.%u.%u%s.\n"), version.major,
3102 version.minor, version.build,
3103 version.ext != NULL ? version.ext : "");
b0627500
MM
3104
3105 btrace_maint_update_pt_packets (btinfo);
6cb06a8c
TT
3106 gdb_printf (_("Number of packets: %zu.\n"),
3107 ((btinfo->maint.variant.pt.packets == nullptr)
3108 ? 0 : btinfo->maint.variant.pt.packets->size ()));
b0627500
MM
3109 }
3110 break;
3111#endif /* defined (HAVE_LIBIPT) */
3112 }
3113}
3114
3115/* The "maint show btrace pt skip-pad" show value function. */
3116
3117static void
3118show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3119 struct cmd_list_element *c,
3120 const char *value)
3121{
6cb06a8c 3122 gdb_printf (file, _("Skip PAD packets is %s.\n"), value);
b0627500
MM
3123}
3124
3125
3126/* Initialize btrace maintenance commands. */
3127
6c265988 3128void _initialize_btrace ();
b0627500 3129void
6c265988 3130_initialize_btrace ()
b0627500
MM
3131{
3132 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3133 _("Info about branch tracing data."), &maintenanceinfolist);
3134
0743fc83
TT
3135 add_basic_prefix_cmd ("btrace", class_maintenance,
3136 _("Branch tracing maintenance commands."),
2f822da5 3137 &maint_btrace_cmdlist, 0, &maintenancelist);
b0627500 3138
f54bdb6d
SM
3139 add_setshow_prefix_cmd ("btrace", class_maintenance,
3140 _("Set branch tracing specific variables."),
3141 _("Show branch tracing specific variables."),
3142 &maint_btrace_set_cmdlist,
3143 &maint_btrace_show_cmdlist,
3144 &maintenance_set_cmdlist,
3145 &maintenance_show_cmdlist);
3146
3147 add_setshow_prefix_cmd ("pt", class_maintenance,
3148 _("Set Intel Processor Trace specific variables."),
3149 _("Show Intel Processor Trace specific variables."),
3150 &maint_btrace_pt_set_cmdlist,
3151 &maint_btrace_pt_show_cmdlist,
3152 &maint_btrace_set_cmdlist,
3153 &maint_btrace_show_cmdlist);
b0627500
MM
3154
3155 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3156 &maint_btrace_pt_skip_pad, _("\
3157Set whether PAD packets should be skipped in the btrace packet history."), _("\
3158Show whether PAD packets should be skipped in the btrace packet history."),_("\
3159When enabled, PAD packets are ignored in the btrace packet history."),
3160 NULL, show_maint_btrace_pt_skip_pad,
3161 &maint_btrace_pt_set_cmdlist,
3162 &maint_btrace_pt_show_cmdlist);
3163
3164 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3165 _("Print the raw branch tracing data.\n\
3166With no argument, print ten more packets after the previous ten-line print.\n\
3167With '-' as argument print ten packets before a previous ten-line print.\n\
3168One argument specifies the starting packet of a ten-line print.\n\
3169Two arguments with comma between specify starting and ending packets to \
3170print.\n\
3171Preceded with '+'/'-' the second argument specifies the distance from the \
89549d7f 3172first."),
b0627500
MM
3173 &maint_btrace_cmdlist);
3174
3175 add_cmd ("clear-packet-history", class_maintenance,
3176 maint_btrace_clear_packet_history_cmd,
3177 _("Clears the branch tracing packet history.\n\
89549d7f 3178Discards the raw branch tracing data but not the execution history data."),
b0627500
MM
3179 &maint_btrace_cmdlist);
3180
3181 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3182 _("Clears the branch tracing data.\n\
3183Discards the raw branch tracing data and the execution history data.\n\
89549d7f 3184The next 'record' command will fetch the branch tracing data anew."),
b0627500
MM
3185 &maint_btrace_cmdlist);
3186
3187}