]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/btrace.c
Automatic date update in version.in
[thirdparty/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524 34#include "rsp-low.h"
b0627500
MM
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
b20a6524
MM
37
38#include <inttypes.h>
b0627500 39#include <ctype.h>
325fac50 40#include <algorithm>
b0627500
MM
41
42/* Command lists for btrace maintenance commands. */
43static struct cmd_list_element *maint_btrace_cmdlist;
44static struct cmd_list_element *maint_btrace_set_cmdlist;
45static struct cmd_list_element *maint_btrace_show_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49/* Control whether to skip PAD packets when computing the packet history. */
50static int maint_btrace_pt_skip_pad = 1;
b20a6524
MM
51
52static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
53
54/* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57#define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
02d27625
MM
68/* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71static const char *
23a7fe75 72ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
73{
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
efd66ac6 84 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
85
86 return "<unknown>";
87}
88
89/* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92static const char *
23a7fe75 93ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
94{
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
08be3fe3 101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
102 else
103 filename = "<unknown>";
104
105 return filename;
106}
107
23a7fe75
MM
108/* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
02d27625 110
23a7fe75
MM
111static const char *
112ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 113{
23a7fe75
MM
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
02d27625
MM
118}
119
23a7fe75 120/* Print an ftrace debug status message. */
02d27625
MM
121
122static void
23a7fe75 123ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 124{
23a7fe75
MM
125 const char *fun, *file;
126 unsigned int ibegin, iend;
ce0dfbea 127 int level;
23a7fe75
MM
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
23a7fe75
MM
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
ce0dfbea
MM
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
02d27625
MM
138}
139
69090cee
TW
140/* Return the number of instructions in a given function call segment. */
141
142static unsigned int
143ftrace_call_num_insn (const struct btrace_function* bfun)
144{
145 if (bfun == NULL)
146 return 0;
147
148 /* A gap is always counted as one instruction. */
149 if (bfun->errcode != 0)
150 return 1;
151
152 return VEC_length (btrace_insn_s, bfun->insn);
153}
154
42bfe59e
TW
155/* Return the function segment with the given NUMBER or NULL if no such segment
156 exists. BTINFO is the branch trace information for the current thread. */
157
158static struct btrace_function *
08c3f6d2
TW
159ftrace_find_call_by_number (struct btrace_thread_info *btinfo,
160 unsigned int number)
161{
162 if (number == 0 || number > btinfo->functions.size ())
163 return NULL;
164
165 return &btinfo->functions[number - 1];
166}
167
168/* A const version of the function above. */
169
170static const struct btrace_function *
42bfe59e
TW
171ftrace_find_call_by_number (const struct btrace_thread_info *btinfo,
172 unsigned int number)
173{
174 if (number == 0 || number > btinfo->functions.size ())
175 return NULL;
176
08c3f6d2 177 return &btinfo->functions[number - 1];
42bfe59e
TW
178}
179
23a7fe75
MM
180/* Return non-zero if BFUN does not match MFUN and FUN,
181 return zero otherwise. */
02d27625
MM
182
183static int
23a7fe75
MM
184ftrace_function_switched (const struct btrace_function *bfun,
185 const struct minimal_symbol *mfun,
186 const struct symbol *fun)
02d27625
MM
187{
188 struct minimal_symbol *msym;
189 struct symbol *sym;
190
02d27625
MM
191 msym = bfun->msym;
192 sym = bfun->sym;
193
194 /* If the minimal symbol changed, we certainly switched functions. */
195 if (mfun != NULL && msym != NULL
efd66ac6 196 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
197 return 1;
198
199 /* If the symbol changed, we certainly switched functions. */
200 if (fun != NULL && sym != NULL)
201 {
202 const char *bfname, *fname;
203
204 /* Check the function name. */
205 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
206 return 1;
207
208 /* Check the location of those functions, as well. */
08be3fe3
DE
209 bfname = symtab_to_fullname (symbol_symtab (sym));
210 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
211 if (filename_cmp (fname, bfname) != 0)
212 return 1;
213 }
214
23a7fe75
MM
215 /* If we lost symbol information, we switched functions. */
216 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
217 return 1;
218
219 /* If we gained symbol information, we switched functions. */
220 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
221 return 1;
222
02d27625
MM
223 return 0;
224}
225
8286623c
TW
226/* Allocate and initialize a new branch trace function segment at the end of
227 the trace.
17b89b34 228 BTINFO is the branch trace information for the current thread.
08c3f6d2
TW
229 MFUN and FUN are the symbol information we have for this function.
230 This invalidates all struct btrace_function pointer currently held. */
23a7fe75
MM
231
232static struct btrace_function *
17b89b34 233ftrace_new_function (struct btrace_thread_info *btinfo,
23a7fe75
MM
234 struct minimal_symbol *mfun,
235 struct symbol *fun)
236{
08c3f6d2
TW
237 int level;
238 unsigned int number, insn_offset;
23a7fe75 239
b54b03bd 240 if (btinfo->functions.empty ())
5de9129b 241 {
08c3f6d2
TW
242 /* Start counting NUMBER and INSN_OFFSET at one. */
243 level = 0;
244 number = 1;
245 insn_offset = 1;
5de9129b
MM
246 }
247 else
23a7fe75 248 {
08c3f6d2
TW
249 const struct btrace_function *prev = &btinfo->functions.back ();
250 level = prev->level;
251 number = prev->number + 1;
252 insn_offset = prev->insn_offset + ftrace_call_num_insn (prev);
23a7fe75
MM
253 }
254
08c3f6d2
TW
255 btinfo->functions.emplace_back (mfun, fun, number, insn_offset, level);
256 return &btinfo->functions.back ();
02d27625
MM
257}
258
23a7fe75 259/* Update the UP field of a function segment. */
02d27625 260
23a7fe75
MM
261static void
262ftrace_update_caller (struct btrace_function *bfun,
263 struct btrace_function *caller,
264 enum btrace_function_flag flags)
02d27625 265{
42bfe59e 266 if (bfun->up != 0)
23a7fe75 267 ftrace_debug (bfun, "updating caller");
02d27625 268
42bfe59e 269 bfun->up = caller->number;
23a7fe75
MM
270 bfun->flags = flags;
271
272 ftrace_debug (bfun, "set caller");
d87fdac3 273 ftrace_debug (caller, "..to");
23a7fe75
MM
274}
275
276/* Fix up the caller for all segments of a function. */
277
278static void
4aeb0dfc
TW
279ftrace_fixup_caller (struct btrace_thread_info *btinfo,
280 struct btrace_function *bfun,
23a7fe75
MM
281 struct btrace_function *caller,
282 enum btrace_function_flag flags)
283{
4aeb0dfc 284 unsigned int prev, next;
23a7fe75 285
4aeb0dfc
TW
286 prev = bfun->prev;
287 next = bfun->next;
23a7fe75
MM
288 ftrace_update_caller (bfun, caller, flags);
289
290 /* Update all function segments belonging to the same function. */
4aeb0dfc
TW
291 for (; prev != 0; prev = bfun->prev)
292 {
293 bfun = ftrace_find_call_by_number (btinfo, prev);
294 ftrace_update_caller (bfun, caller, flags);
295 }
23a7fe75 296
4aeb0dfc
TW
297 for (; next != 0; next = bfun->next)
298 {
299 bfun = ftrace_find_call_by_number (btinfo, next);
300 ftrace_update_caller (bfun, caller, flags);
301 }
23a7fe75
MM
302}
303
8286623c 304/* Add a new function segment for a call at the end of the trace.
17b89b34 305 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
306 MFUN and FUN are the symbol information we have for this function. */
307
308static struct btrace_function *
17b89b34 309ftrace_new_call (struct btrace_thread_info *btinfo,
23a7fe75
MM
310 struct minimal_symbol *mfun,
311 struct symbol *fun)
312{
b54b03bd 313 const unsigned int length = btinfo->functions.size ();
8286623c 314 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
23a7fe75 315
42bfe59e 316 bfun->up = length;
31fd9caa 317 bfun->level += 1;
23a7fe75
MM
318
319 ftrace_debug (bfun, "new call");
320
321 return bfun;
322}
323
8286623c 324/* Add a new function segment for a tail call at the end of the trace.
17b89b34 325 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
326 MFUN and FUN are the symbol information we have for this function. */
327
328static struct btrace_function *
17b89b34 329ftrace_new_tailcall (struct btrace_thread_info *btinfo,
23a7fe75
MM
330 struct minimal_symbol *mfun,
331 struct symbol *fun)
332{
b54b03bd 333 const unsigned int length = btinfo->functions.size ();
8286623c 334 struct btrace_function *bfun = ftrace_new_function (btinfo, mfun, fun);
02d27625 335
42bfe59e 336 bfun->up = length;
31fd9caa 337 bfun->level += 1;
23a7fe75 338 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 339
23a7fe75
MM
340 ftrace_debug (bfun, "new tail call");
341
342 return bfun;
343}
344
d87fdac3 345/* Return the caller of BFUN or NULL if there is none. This function skips
42bfe59e
TW
346 tail calls in the call chain. BTINFO is the branch trace information for
347 the current thread. */
d87fdac3 348static struct btrace_function *
42bfe59e
TW
349ftrace_get_caller (struct btrace_thread_info *btinfo,
350 struct btrace_function *bfun)
d87fdac3 351{
42bfe59e 352 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
d87fdac3 353 if ((bfun->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
42bfe59e 354 return ftrace_find_call_by_number (btinfo, bfun->up);
d87fdac3
MM
355
356 return NULL;
357}
358
23a7fe75 359/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
42bfe59e
TW
360 symbol information. BTINFO is the branch trace information for the current
361 thread. */
23a7fe75
MM
362
363static struct btrace_function *
42bfe59e
TW
364ftrace_find_caller (struct btrace_thread_info *btinfo,
365 struct btrace_function *bfun,
23a7fe75
MM
366 struct minimal_symbol *mfun,
367 struct symbol *fun)
368{
42bfe59e 369 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
23a7fe75
MM
370 {
371 /* Skip functions with incompatible symbol information. */
372 if (ftrace_function_switched (bfun, mfun, fun))
373 continue;
374
375 /* This is the function segment we're looking for. */
376 break;
377 }
378
379 return bfun;
380}
381
382/* Find the innermost caller in the back trace of BFUN, skipping all
383 function segments that do not end with a call instruction (e.g.
42bfe59e
TW
384 tail calls ending with a jump). BTINFO is the branch trace information for
385 the current thread. */
23a7fe75
MM
386
387static struct btrace_function *
42bfe59e
TW
388ftrace_find_call (struct btrace_thread_info *btinfo,
389 struct btrace_function *bfun)
23a7fe75 390{
42bfe59e 391 for (; bfun != NULL; bfun = ftrace_find_call_by_number (btinfo, bfun->up))
02d27625 392 {
23a7fe75 393 struct btrace_insn *last;
02d27625 394
31fd9caa
MM
395 /* Skip gaps. */
396 if (bfun->errcode != 0)
397 continue;
23a7fe75
MM
398
399 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 400
7d5c24b3 401 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
402 break;
403 }
404
405 return bfun;
406}
407
8286623c
TW
408/* Add a continuation segment for a function into which we return at the end of
409 the trace.
17b89b34 410 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
411 MFUN and FUN are the symbol information we have for this function. */
412
413static struct btrace_function *
17b89b34 414ftrace_new_return (struct btrace_thread_info *btinfo,
23a7fe75
MM
415 struct minimal_symbol *mfun,
416 struct symbol *fun)
417{
08c3f6d2 418 struct btrace_function *prev, *bfun, *caller;
23a7fe75 419
8286623c 420 bfun = ftrace_new_function (btinfo, mfun, fun);
08c3f6d2 421 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
23a7fe75
MM
422
423 /* It is important to start at PREV's caller. Otherwise, we might find
424 PREV itself, if PREV is a recursive function. */
42bfe59e
TW
425 caller = ftrace_find_call_by_number (btinfo, prev->up);
426 caller = ftrace_find_caller (btinfo, caller, mfun, fun);
23a7fe75
MM
427 if (caller != NULL)
428 {
429 /* The caller of PREV is the preceding btrace function segment in this
430 function instance. */
4aeb0dfc 431 gdb_assert (caller->next == 0);
23a7fe75 432
4aeb0dfc
TW
433 caller->next = bfun->number;
434 bfun->prev = caller->number;
23a7fe75
MM
435
436 /* Maintain the function level. */
437 bfun->level = caller->level;
438
439 /* Maintain the call stack. */
440 bfun->up = caller->up;
441 bfun->flags = caller->flags;
442
443 ftrace_debug (bfun, "new return");
444 }
445 else
446 {
447 /* We did not find a caller. This could mean that something went
448 wrong or that the call is simply not included in the trace. */
02d27625 449
23a7fe75 450 /* Let's search for some actual call. */
42bfe59e
TW
451 caller = ftrace_find_call_by_number (btinfo, prev->up);
452 caller = ftrace_find_call (btinfo, caller);
23a7fe75 453 if (caller == NULL)
02d27625 454 {
23a7fe75
MM
455 /* There is no call in PREV's back trace. We assume that the
456 branch trace did not include it. */
457
259ba1e8
MM
458 /* Let's find the topmost function and add a new caller for it.
459 This should handle a series of initial tail calls. */
42bfe59e
TW
460 while (prev->up != 0)
461 prev = ftrace_find_call_by_number (btinfo, prev->up);
02d27625 462
259ba1e8 463 bfun->level = prev->level - 1;
23a7fe75
MM
464
465 /* Fix up the call stack for PREV. */
4aeb0dfc 466 ftrace_fixup_caller (btinfo, prev, bfun, BFUN_UP_LINKS_TO_RET);
23a7fe75
MM
467
468 ftrace_debug (bfun, "new return - no caller");
469 }
470 else
02d27625 471 {
23a7fe75 472 /* There is a call in PREV's back trace to which we should have
259ba1e8
MM
473 returned but didn't. Let's start a new, separate back trace
474 from PREV's level. */
475 bfun->level = prev->level - 1;
476
477 /* We fix up the back trace for PREV but leave other function segments
478 on the same level as they are.
479 This should handle things like schedule () correctly where we're
480 switching contexts. */
42bfe59e 481 prev->up = bfun->number;
259ba1e8 482 prev->flags = BFUN_UP_LINKS_TO_RET;
02d27625 483
23a7fe75 484 ftrace_debug (bfun, "new return - unknown caller");
02d27625 485 }
23a7fe75
MM
486 }
487
488 return bfun;
489}
490
8286623c 491/* Add a new function segment for a function switch at the end of the trace.
17b89b34 492 BTINFO is the branch trace information for the current thread.
23a7fe75
MM
493 MFUN and FUN are the symbol information we have for this function. */
494
495static struct btrace_function *
17b89b34 496ftrace_new_switch (struct btrace_thread_info *btinfo,
23a7fe75
MM
497 struct minimal_symbol *mfun,
498 struct symbol *fun)
499{
08c3f6d2 500 struct btrace_function *prev, *bfun;
23a7fe75 501
4c2c7ac6
MM
502 /* This is an unexplained function switch. We can't really be sure about the
503 call stack, yet the best I can think of right now is to preserve it. */
8286623c 504 bfun = ftrace_new_function (btinfo, mfun, fun);
08c3f6d2 505 prev = ftrace_find_call_by_number (btinfo, bfun->number - 1);
4c2c7ac6
MM
506 bfun->up = prev->up;
507 bfun->flags = prev->flags;
02d27625 508
23a7fe75
MM
509 ftrace_debug (bfun, "new switch");
510
511 return bfun;
512}
513
8286623c
TW
514/* Add a new function segment for a gap in the trace due to a decode error at
515 the end of the trace.
17b89b34 516 BTINFO is the branch trace information for the current thread.
31fd9caa
MM
517 ERRCODE is the format-specific error code. */
518
519static struct btrace_function *
8ffd39f2
TW
520ftrace_new_gap (struct btrace_thread_info *btinfo, int errcode,
521 std::vector<unsigned int> &gaps)
31fd9caa
MM
522{
523 struct btrace_function *bfun;
524
b54b03bd 525 if (btinfo->functions.empty ())
8286623c 526 bfun = ftrace_new_function (btinfo, NULL, NULL);
b54b03bd
TW
527 else
528 {
529 /* We hijack the previous function segment if it was empty. */
08c3f6d2 530 bfun = &btinfo->functions.back ();
b54b03bd
TW
531 if (bfun->errcode != 0 || !VEC_empty (btrace_insn_s, bfun->insn))
532 bfun = ftrace_new_function (btinfo, NULL, NULL);
533 }
31fd9caa
MM
534
535 bfun->errcode = errcode;
8ffd39f2 536 gaps.push_back (bfun->number);
31fd9caa
MM
537
538 ftrace_debug (bfun, "new gap");
539
540 return bfun;
541}
542
8286623c
TW
543/* Update the current function segment at the end of the trace in BTINFO with
544 respect to the instruction at PC. This may create new function segments.
23a7fe75
MM
545 Return the chronologically latest function segment, never NULL. */
546
547static struct btrace_function *
8286623c 548ftrace_update_function (struct btrace_thread_info *btinfo, CORE_ADDR pc)
23a7fe75
MM
549{
550 struct bound_minimal_symbol bmfun;
551 struct minimal_symbol *mfun;
552 struct symbol *fun;
553 struct btrace_insn *last;
b54b03bd 554 struct btrace_function *bfun;
23a7fe75
MM
555
556 /* Try to determine the function we're in. We use both types of symbols
557 to avoid surprises when we sometimes get a full symbol and sometimes
558 only a minimal symbol. */
559 fun = find_pc_function (pc);
560 bmfun = lookup_minimal_symbol_by_pc (pc);
561 mfun = bmfun.minsym;
562
563 if (fun == NULL && mfun == NULL)
564 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
565
b54b03bd
TW
566 /* If we didn't have a function, we create one. */
567 if (btinfo->functions.empty ())
568 return ftrace_new_function (btinfo, mfun, fun);
569
570 /* If we had a gap before, we create a function. */
08c3f6d2 571 bfun = &btinfo->functions.back ();
b54b03bd 572 if (bfun->errcode != 0)
8286623c 573 return ftrace_new_function (btinfo, mfun, fun);
23a7fe75
MM
574
575 /* Check the last instruction, if we have one.
576 We do this check first, since it allows us to fill in the call stack
577 links in addition to the normal flow links. */
578 last = NULL;
579 if (!VEC_empty (btrace_insn_s, bfun->insn))
580 last = VEC_last (btrace_insn_s, bfun->insn);
581
582 if (last != NULL)
583 {
7d5c24b3
MM
584 switch (last->iclass)
585 {
586 case BTRACE_INSN_RETURN:
986b6601
MM
587 {
588 const char *fname;
589
590 /* On some systems, _dl_runtime_resolve returns to the resolved
591 function instead of jumping to it. From our perspective,
592 however, this is a tailcall.
593 If we treated it as return, we wouldn't be able to find the
594 resolved function in our stack back trace. Hence, we would
595 lose the current stack back trace and start anew with an empty
596 back trace. When the resolved function returns, we would then
597 create a stack back trace with the same function names but
598 different frame id's. This will confuse stepping. */
599 fname = ftrace_print_function_name (bfun);
600 if (strcmp (fname, "_dl_runtime_resolve") == 0)
8286623c 601 return ftrace_new_tailcall (btinfo, mfun, fun);
986b6601 602
8286623c 603 return ftrace_new_return (btinfo, mfun, fun);
986b6601 604 }
23a7fe75 605
7d5c24b3
MM
606 case BTRACE_INSN_CALL:
607 /* Ignore calls to the next instruction. They are used for PIC. */
608 if (last->pc + last->size == pc)
609 break;
23a7fe75 610
8286623c 611 return ftrace_new_call (btinfo, mfun, fun);
23a7fe75 612
7d5c24b3
MM
613 case BTRACE_INSN_JUMP:
614 {
615 CORE_ADDR start;
23a7fe75 616
7d5c24b3 617 start = get_pc_function_start (pc);
23a7fe75 618
2dfdb47a
MM
619 /* A jump to the start of a function is (typically) a tail call. */
620 if (start == pc)
8286623c 621 return ftrace_new_tailcall (btinfo, mfun, fun);
2dfdb47a 622
7d5c24b3 623 /* If we can't determine the function for PC, we treat a jump at
2dfdb47a
MM
624 the end of the block as tail call if we're switching functions
625 and as an intra-function branch if we don't. */
626 if (start == 0 && ftrace_function_switched (bfun, mfun, fun))
8286623c 627 return ftrace_new_tailcall (btinfo, mfun, fun);
2dfdb47a
MM
628
629 break;
7d5c24b3 630 }
02d27625 631 }
23a7fe75
MM
632 }
633
634 /* Check if we're switching functions for some other reason. */
635 if (ftrace_function_switched (bfun, mfun, fun))
636 {
637 DEBUG_FTRACE ("switching from %s in %s at %s",
638 ftrace_print_insn_addr (last),
639 ftrace_print_function_name (bfun),
640 ftrace_print_filename (bfun));
02d27625 641
8286623c 642 return ftrace_new_switch (btinfo, mfun, fun);
23a7fe75
MM
643 }
644
645 return bfun;
646}
647
23a7fe75
MM
648/* Add the instruction at PC to BFUN's instructions. */
649
650static void
7d5c24b3
MM
651ftrace_update_insns (struct btrace_function *bfun,
652 const struct btrace_insn *insn)
23a7fe75 653{
7d5c24b3 654 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
655
656 if (record_debug > 1)
657 ftrace_debug (bfun, "update insn");
658}
659
7d5c24b3
MM
660/* Classify the instruction at PC. */
661
662static enum btrace_insn_class
663ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
664{
7d5c24b3
MM
665 enum btrace_insn_class iclass;
666
667 iclass = BTRACE_INSN_OTHER;
492d29ea 668 TRY
7d5c24b3
MM
669 {
670 if (gdbarch_insn_is_call (gdbarch, pc))
671 iclass = BTRACE_INSN_CALL;
672 else if (gdbarch_insn_is_ret (gdbarch, pc))
673 iclass = BTRACE_INSN_RETURN;
674 else if (gdbarch_insn_is_jump (gdbarch, pc))
675 iclass = BTRACE_INSN_JUMP;
676 }
492d29ea
PA
677 CATCH (error, RETURN_MASK_ERROR)
678 {
679 }
680 END_CATCH
7d5c24b3
MM
681
682 return iclass;
683}
684
d87fdac3
MM
685/* Try to match the back trace at LHS to the back trace at RHS. Returns the
686 number of matching function segments or zero if the back traces do not
42bfe59e 687 match. BTINFO is the branch trace information for the current thread. */
d87fdac3
MM
688
689static int
42bfe59e
TW
690ftrace_match_backtrace (struct btrace_thread_info *btinfo,
691 struct btrace_function *lhs,
d87fdac3
MM
692 struct btrace_function *rhs)
693{
694 int matches;
695
696 for (matches = 0; lhs != NULL && rhs != NULL; ++matches)
697 {
698 if (ftrace_function_switched (lhs, rhs->msym, rhs->sym))
699 return 0;
700
42bfe59e
TW
701 lhs = ftrace_get_caller (btinfo, lhs);
702 rhs = ftrace_get_caller (btinfo, rhs);
d87fdac3
MM
703 }
704
705 return matches;
706}
707
eb8f2b9c
TW
708/* Add ADJUSTMENT to the level of BFUN and succeeding function segments.
709 BTINFO is the branch trace information for the current thread. */
d87fdac3
MM
710
711static void
eb8f2b9c
TW
712ftrace_fixup_level (struct btrace_thread_info *btinfo,
713 struct btrace_function *bfun, int adjustment)
d87fdac3
MM
714{
715 if (adjustment == 0)
716 return;
717
718 DEBUG_FTRACE ("fixup level (%+d)", adjustment);
719 ftrace_debug (bfun, "..bfun");
720
eb8f2b9c
TW
721 while (bfun != NULL)
722 {
723 bfun->level += adjustment;
724 bfun = ftrace_find_call_by_number (btinfo, bfun->number + 1);
725 }
d87fdac3
MM
726}
727
728/* Recompute the global level offset. Traverse the function trace and compute
729 the global level offset as the negative of the minimal function level. */
730
731static void
732ftrace_compute_global_level_offset (struct btrace_thread_info *btinfo)
733{
b54b03bd 734 int level = INT_MAX;
d87fdac3
MM
735
736 if (btinfo == NULL)
737 return;
738
b54b03bd 739 if (btinfo->functions.empty ())
d87fdac3
MM
740 return;
741
b54b03bd
TW
742 unsigned int length = btinfo->functions.size() - 1;
743 for (unsigned int i = 0; i < length; ++i)
08c3f6d2 744 level = std::min (level, btinfo->functions[i].level);
b54b03bd 745
d87fdac3
MM
746 /* The last function segment contains the current instruction, which is not
747 really part of the trace. If it contains just this one instruction, we
b54b03bd 748 ignore the segment. */
08c3f6d2 749 struct btrace_function *last = &btinfo->functions.back();
b54b03bd
TW
750 if (VEC_length (btrace_insn_s, last->insn) != 1)
751 level = std::min (level, last->level);
d87fdac3
MM
752
753 DEBUG_FTRACE ("setting global level offset: %d", -level);
754 btinfo->level = -level;
755}
756
757/* Connect the function segments PREV and NEXT in a bottom-to-top walk as in
42bfe59e
TW
758 ftrace_connect_backtrace. BTINFO is the branch trace information for the
759 current thread. */
d87fdac3
MM
760
761static void
42bfe59e
TW
762ftrace_connect_bfun (struct btrace_thread_info *btinfo,
763 struct btrace_function *prev,
d87fdac3
MM
764 struct btrace_function *next)
765{
766 DEBUG_FTRACE ("connecting...");
767 ftrace_debug (prev, "..prev");
768 ftrace_debug (next, "..next");
769
770 /* The function segments are not yet connected. */
4aeb0dfc
TW
771 gdb_assert (prev->next == 0);
772 gdb_assert (next->prev == 0);
d87fdac3 773
4aeb0dfc
TW
774 prev->next = next->number;
775 next->prev = prev->number;
d87fdac3
MM
776
777 /* We may have moved NEXT to a different function level. */
eb8f2b9c 778 ftrace_fixup_level (btinfo, next, prev->level - next->level);
d87fdac3
MM
779
780 /* If we run out of back trace for one, let's use the other's. */
42bfe59e 781 if (prev->up == 0)
d87fdac3 782 {
42bfe59e
TW
783 const btrace_function_flags flags = next->flags;
784
785 next = ftrace_find_call_by_number (btinfo, next->up);
786 if (next != NULL)
d87fdac3
MM
787 {
788 DEBUG_FTRACE ("using next's callers");
4aeb0dfc 789 ftrace_fixup_caller (btinfo, prev, next, flags);
d87fdac3
MM
790 }
791 }
42bfe59e 792 else if (next->up == 0)
d87fdac3 793 {
42bfe59e
TW
794 const btrace_function_flags flags = prev->flags;
795
796 prev = ftrace_find_call_by_number (btinfo, prev->up);
797 if (prev != NULL)
d87fdac3
MM
798 {
799 DEBUG_FTRACE ("using prev's callers");
4aeb0dfc 800 ftrace_fixup_caller (btinfo, next, prev, flags);
d87fdac3
MM
801 }
802 }
803 else
804 {
805 /* PREV may have a tailcall caller, NEXT can't. If it does, fixup the up
806 link to add the tail callers to NEXT's back trace.
807
808 This removes NEXT->UP from NEXT's back trace. It will be added back
809 when connecting NEXT and PREV's callers - provided they exist.
810
811 If PREV's back trace consists of a series of tail calls without an
812 actual call, there will be no further connection and NEXT's caller will
813 be removed for good. To catch this case, we handle it here and connect
814 the top of PREV's back trace to NEXT's caller. */
815 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
816 {
817 struct btrace_function *caller;
42bfe59e 818 btrace_function_flags next_flags, prev_flags;
d87fdac3
MM
819
820 /* We checked NEXT->UP above so CALLER can't be NULL. */
42bfe59e
TW
821 caller = ftrace_find_call_by_number (btinfo, next->up);
822 next_flags = next->flags;
823 prev_flags = prev->flags;
d87fdac3
MM
824
825 DEBUG_FTRACE ("adding prev's tail calls to next");
826
42bfe59e 827 prev = ftrace_find_call_by_number (btinfo, prev->up);
4aeb0dfc 828 ftrace_fixup_caller (btinfo, next, prev, prev_flags);
d87fdac3 829
42bfe59e
TW
830 for (; prev != NULL; prev = ftrace_find_call_by_number (btinfo,
831 prev->up))
d87fdac3
MM
832 {
833 /* At the end of PREV's back trace, continue with CALLER. */
42bfe59e 834 if (prev->up == 0)
d87fdac3
MM
835 {
836 DEBUG_FTRACE ("fixing up link for tailcall chain");
837 ftrace_debug (prev, "..top");
838 ftrace_debug (caller, "..up");
839
4aeb0dfc 840 ftrace_fixup_caller (btinfo, prev, caller, next_flags);
d87fdac3
MM
841
842 /* If we skipped any tail calls, this may move CALLER to a
843 different function level.
844
845 Note that changing CALLER's level is only OK because we
846 know that this is the last iteration of the bottom-to-top
847 walk in ftrace_connect_backtrace.
848
849 Otherwise we will fix up CALLER's level when we connect it
850 to PREV's caller in the next iteration. */
eb8f2b9c
TW
851 ftrace_fixup_level (btinfo, caller,
852 prev->level - caller->level - 1);
d87fdac3
MM
853 break;
854 }
855
856 /* There's nothing to do if we find a real call. */
857 if ((prev->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
858 {
859 DEBUG_FTRACE ("will fix up link in next iteration");
860 break;
861 }
862 }
863 }
864 }
865}
866
867/* Connect function segments on the same level in the back trace at LHS and RHS.
868 The back traces at LHS and RHS are expected to match according to
42bfe59e
TW
869 ftrace_match_backtrace. BTINFO is the branch trace information for the
870 current thread. */
d87fdac3
MM
871
872static void
42bfe59e
TW
873ftrace_connect_backtrace (struct btrace_thread_info *btinfo,
874 struct btrace_function *lhs,
d87fdac3
MM
875 struct btrace_function *rhs)
876{
877 while (lhs != NULL && rhs != NULL)
878 {
879 struct btrace_function *prev, *next;
880
881 gdb_assert (!ftrace_function_switched (lhs, rhs->msym, rhs->sym));
882
883 /* Connecting LHS and RHS may change the up link. */
884 prev = lhs;
885 next = rhs;
886
42bfe59e
TW
887 lhs = ftrace_get_caller (btinfo, lhs);
888 rhs = ftrace_get_caller (btinfo, rhs);
d87fdac3 889
42bfe59e 890 ftrace_connect_bfun (btinfo, prev, next);
d87fdac3
MM
891 }
892}
893
894/* Bridge the gap between two function segments left and right of a gap if their
42bfe59e
TW
895 respective back traces match in at least MIN_MATCHES functions. BTINFO is
896 the branch trace information for the current thread.
d87fdac3
MM
897
898 Returns non-zero if the gap could be bridged, zero otherwise. */
899
900static int
42bfe59e
TW
901ftrace_bridge_gap (struct btrace_thread_info *btinfo,
902 struct btrace_function *lhs, struct btrace_function *rhs,
d87fdac3
MM
903 int min_matches)
904{
905 struct btrace_function *best_l, *best_r, *cand_l, *cand_r;
906 int best_matches;
907
908 DEBUG_FTRACE ("checking gap at insn %u (req matches: %d)",
909 rhs->insn_offset - 1, min_matches);
910
911 best_matches = 0;
912 best_l = NULL;
913 best_r = NULL;
914
915 /* We search the back traces of LHS and RHS for valid connections and connect
916 the two functon segments that give the longest combined back trace. */
917
42bfe59e
TW
918 for (cand_l = lhs; cand_l != NULL;
919 cand_l = ftrace_get_caller (btinfo, cand_l))
920 for (cand_r = rhs; cand_r != NULL;
921 cand_r = ftrace_get_caller (btinfo, cand_r))
d87fdac3
MM
922 {
923 int matches;
924
42bfe59e 925 matches = ftrace_match_backtrace (btinfo, cand_l, cand_r);
d87fdac3
MM
926 if (best_matches < matches)
927 {
928 best_matches = matches;
929 best_l = cand_l;
930 best_r = cand_r;
931 }
932 }
933
934 /* We need at least MIN_MATCHES matches. */
935 gdb_assert (min_matches > 0);
936 if (best_matches < min_matches)
937 return 0;
938
939 DEBUG_FTRACE ("..matches: %d", best_matches);
940
941 /* We will fix up the level of BEST_R and succeeding function segments such
942 that BEST_R's level matches BEST_L's when we connect BEST_L to BEST_R.
943
944 This will ignore the level of RHS and following if BEST_R != RHS. I.e. if
945 BEST_R is a successor of RHS in the back trace of RHS (phases 1 and 3).
946
947 To catch this, we already fix up the level here where we can start at RHS
948 instead of at BEST_R. We will ignore the level fixup when connecting
949 BEST_L to BEST_R as they will already be on the same level. */
eb8f2b9c 950 ftrace_fixup_level (btinfo, rhs, best_l->level - best_r->level);
d87fdac3 951
42bfe59e 952 ftrace_connect_backtrace (btinfo, best_l, best_r);
d87fdac3
MM
953
954 return best_matches;
955}
956
957/* Try to bridge gaps due to overflow or decode errors by connecting the
958 function segments that are separated by the gap. */
959
960static void
8ffd39f2 961btrace_bridge_gaps (struct thread_info *tp, std::vector<unsigned int> &gaps)
d87fdac3 962{
4aeb0dfc 963 struct btrace_thread_info *btinfo = &tp->btrace;
8ffd39f2 964 std::vector<unsigned int> remaining;
d87fdac3
MM
965 int min_matches;
966
967 DEBUG ("bridge gaps");
968
d87fdac3
MM
969 /* We require a minimum amount of matches for bridging a gap. The number of
970 required matches will be lowered with each iteration.
971
972 The more matches the higher our confidence that the bridging is correct.
973 For big gaps or small traces, however, it may not be feasible to require a
974 high number of matches. */
975 for (min_matches = 5; min_matches > 0; --min_matches)
976 {
977 /* Let's try to bridge as many gaps as we can. In some cases, we need to
978 skip a gap and revisit it again after we closed later gaps. */
8ffd39f2 979 while (!gaps.empty ())
d87fdac3 980 {
8ffd39f2 981 for (const unsigned int number : gaps)
d87fdac3 982 {
8ffd39f2 983 struct btrace_function *gap, *lhs, *rhs;
d87fdac3
MM
984 int bridged;
985
8ffd39f2
TW
986 gap = ftrace_find_call_by_number (btinfo, number);
987
d87fdac3
MM
988 /* We may have a sequence of gaps if we run from one error into
989 the next as we try to re-sync onto the trace stream. Ignore
990 all but the leftmost gap in such a sequence.
991
992 Also ignore gaps at the beginning of the trace. */
eb8f2b9c 993 lhs = ftrace_find_call_by_number (btinfo, gap->number - 1);
d87fdac3
MM
994 if (lhs == NULL || lhs->errcode != 0)
995 continue;
996
997 /* Skip gaps to the right. */
eb8f2b9c
TW
998 rhs = ftrace_find_call_by_number (btinfo, gap->number + 1);
999 while (rhs != NULL && rhs->errcode != 0)
1000 rhs = ftrace_find_call_by_number (btinfo, rhs->number + 1);
d87fdac3
MM
1001
1002 /* Ignore gaps at the end of the trace. */
1003 if (rhs == NULL)
1004 continue;
1005
eb8f2b9c 1006 bridged = ftrace_bridge_gap (btinfo, lhs, rhs, min_matches);
d87fdac3
MM
1007
1008 /* Keep track of gaps we were not able to bridge and try again.
1009 If we just pushed them to the end of GAPS we would risk an
1010 infinite loop in case we simply cannot bridge a gap. */
1011 if (bridged == 0)
8ffd39f2 1012 remaining.push_back (number);
d87fdac3
MM
1013 }
1014
1015 /* Let's see if we made any progress. */
8ffd39f2 1016 if (remaining.size () == gaps.size ())
d87fdac3
MM
1017 break;
1018
8ffd39f2
TW
1019 gaps.clear ();
1020 gaps.swap (remaining);
d87fdac3
MM
1021 }
1022
1023 /* We get here if either GAPS is empty or if GAPS equals REMAINING. */
8ffd39f2 1024 if (gaps.empty ())
d87fdac3
MM
1025 break;
1026
8ffd39f2 1027 remaining.clear ();
d87fdac3
MM
1028 }
1029
d87fdac3
MM
1030 /* We may omit this in some cases. Not sure it is worth the extra
1031 complication, though. */
eb8f2b9c 1032 ftrace_compute_global_level_offset (btinfo);
d87fdac3
MM
1033}
1034
734b0e4b 1035/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
1036
1037static void
76235df1 1038btrace_compute_ftrace_bts (struct thread_info *tp,
d87fdac3 1039 const struct btrace_data_bts *btrace,
8ffd39f2 1040 std::vector<unsigned int> &gaps)
23a7fe75 1041{
76235df1 1042 struct btrace_thread_info *btinfo;
23a7fe75 1043 struct gdbarch *gdbarch;
d87fdac3 1044 unsigned int blk;
23a7fe75
MM
1045 int level;
1046
23a7fe75 1047 gdbarch = target_gdbarch ();
76235df1 1048 btinfo = &tp->btrace;
734b0e4b 1049 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75 1050
b54b03bd
TW
1051 if (btinfo->functions.empty ())
1052 level = INT_MAX;
1053 else
1054 level = -btinfo->level;
1055
23a7fe75
MM
1056 while (blk != 0)
1057 {
1058 btrace_block_s *block;
1059 CORE_ADDR pc;
1060
1061 blk -= 1;
1062
734b0e4b 1063 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
1064 pc = block->begin;
1065
1066 for (;;)
1067 {
b54b03bd 1068 struct btrace_function *bfun;
7d5c24b3 1069 struct btrace_insn insn;
23a7fe75
MM
1070 int size;
1071
1072 /* We should hit the end of the block. Warn if we went too far. */
1073 if (block->end < pc)
1074 {
b61ce85c 1075 /* Indicate the gap in the trace. */
8ffd39f2 1076 bfun = ftrace_new_gap (btinfo, BDE_BTS_OVERFLOW, gaps);
b61ce85c
MM
1077
1078 warning (_("Recorded trace may be corrupted at instruction "
b54b03bd 1079 "%u (pc = %s)."), bfun->insn_offset - 1,
b61ce85c 1080 core_addr_to_string_nz (pc));
63ab433e 1081
23a7fe75
MM
1082 break;
1083 }
1084
b54b03bd 1085 bfun = ftrace_update_function (btinfo, pc);
23a7fe75 1086
8710b709
MM
1087 /* Maintain the function level offset.
1088 For all but the last block, we do it here. */
1089 if (blk != 0)
b54b03bd 1090 level = std::min (level, bfun->level);
23a7fe75 1091
7d5c24b3 1092 size = 0;
492d29ea
PA
1093 TRY
1094 {
1095 size = gdb_insn_length (gdbarch, pc);
1096 }
1097 CATCH (error, RETURN_MASK_ERROR)
1098 {
1099 }
1100 END_CATCH
7d5c24b3
MM
1101
1102 insn.pc = pc;
1103 insn.size = size;
1104 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 1105 insn.flags = 0;
7d5c24b3 1106
b54b03bd 1107 ftrace_update_insns (bfun, &insn);
23a7fe75
MM
1108
1109 /* We're done once we pushed the instruction at the end. */
1110 if (block->end == pc)
1111 break;
1112
7d5c24b3 1113 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
1114 if (size <= 0)
1115 {
31fd9caa
MM
1116 /* Indicate the gap in the trace. We just added INSN so we're
1117 not at the beginning. */
8ffd39f2 1118 bfun = ftrace_new_gap (btinfo, BDE_BTS_INSN_SIZE, gaps);
31fd9caa 1119
63ab433e 1120 warning (_("Recorded trace may be incomplete at instruction %u "
b54b03bd 1121 "(pc = %s)."), bfun->insn_offset - 1,
63ab433e
MM
1122 core_addr_to_string_nz (pc));
1123
23a7fe75
MM
1124 break;
1125 }
1126
1127 pc += size;
8710b709
MM
1128
1129 /* Maintain the function level offset.
1130 For the last block, we do it here to not consider the last
1131 instruction.
1132 Since the last instruction corresponds to the current instruction
1133 and is not really part of the execution history, it shouldn't
1134 affect the level. */
1135 if (blk == 0)
b54b03bd 1136 level = std::min (level, bfun->level);
23a7fe75 1137 }
02d27625
MM
1138 }
1139
23a7fe75
MM
1140 /* LEVEL is the minimal function level of all btrace function segments.
1141 Define the global level offset to -LEVEL so all function levels are
1142 normalized to start at zero. */
1143 btinfo->level = -level;
02d27625
MM
1144}
1145
b20a6524
MM
1146#if defined (HAVE_LIBIPT)
1147
1148static enum btrace_insn_class
1149pt_reclassify_insn (enum pt_insn_class iclass)
1150{
1151 switch (iclass)
1152 {
1153 case ptic_call:
1154 return BTRACE_INSN_CALL;
1155
1156 case ptic_return:
1157 return BTRACE_INSN_RETURN;
1158
1159 case ptic_jump:
1160 return BTRACE_INSN_JUMP;
1161
1162 default:
1163 return BTRACE_INSN_OTHER;
1164 }
1165}
1166
da8c46d2
MM
1167/* Return the btrace instruction flags for INSN. */
1168
d7abe101 1169static btrace_insn_flags
b5c36682 1170pt_btrace_insn_flags (const struct pt_insn &insn)
da8c46d2 1171{
d7abe101 1172 btrace_insn_flags flags = 0;
da8c46d2 1173
b5c36682 1174 if (insn.speculative)
da8c46d2
MM
1175 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
1176
1177 return flags;
1178}
1179
b5c36682
PA
1180/* Return the btrace instruction for INSN. */
1181
1182static btrace_insn
1183pt_btrace_insn (const struct pt_insn &insn)
1184{
1185 return {(CORE_ADDR) insn.ip, (gdb_byte) insn.size,
1186 pt_reclassify_insn (insn.iclass),
1187 pt_btrace_insn_flags (insn)};
1188}
1189
13ace077
MM
1190/* Handle instruction decode events (libipt-v2). */
1191
1192static int
1193handle_pt_insn_events (struct btrace_thread_info *btinfo,
1194 struct pt_insn_decoder *decoder,
1195 std::vector<unsigned int> &gaps, int status)
1196{
1197#if defined (HAVE_PT_INSN_EVENT)
1198 while (status & pts_event_pending)
1199 {
1200 struct btrace_function *bfun;
1201 struct pt_event event;
1202 uint64_t offset;
1203
1204 status = pt_insn_event (decoder, &event, sizeof (event));
1205 if (status < 0)
1206 break;
1207
1208 switch (event.type)
1209 {
1210 default:
1211 break;
1212
1213 case ptev_enabled:
1214 if (event.variant.enabled.resumed == 0 && !btinfo->functions.empty ())
1215 {
1216 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1217
1218 pt_insn_get_offset (decoder, &offset);
1219
1220 warning (_("Non-contiguous trace at instruction %u (offset = 0x%"
1221 PRIx64 ")."), bfun->insn_offset - 1, offset);
1222 }
1223
1224 break;
1225
1226 case ptev_overflow:
1227 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1228
1229 pt_insn_get_offset (decoder, &offset);
1230
1231 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ")."),
1232 bfun->insn_offset - 1, offset);
1233
1234 break;
1235 }
1236 }
1237#endif /* defined (HAVE_PT_INSN_EVENT) */
1238
1239 return status;
1240}
1241
1242/* Handle events indicated by flags in INSN (libipt-v1). */
1243
1244static void
1245handle_pt_insn_event_flags (struct btrace_thread_info *btinfo,
1246 struct pt_insn_decoder *decoder,
1247 const struct pt_insn &insn,
1248 std::vector<unsigned int> &gaps)
1249{
1250#if defined (HAVE_STRUCT_PT_INSN_ENABLED)
1251 /* Tracing is disabled and re-enabled each time we enter the kernel. Most
1252 times, we continue from the same instruction we stopped before. This is
1253 indicated via the RESUMED instruction flag. The ENABLED instruction flag
1254 means that we continued from some other instruction. Indicate this as a
1255 trace gap except when tracing just started. */
1256 if (insn.enabled && !btinfo->functions.empty ())
1257 {
1258 struct btrace_function *bfun;
1259 uint64_t offset;
1260
1261 bfun = ftrace_new_gap (btinfo, BDE_PT_DISABLED, gaps);
1262
1263 pt_insn_get_offset (decoder, &offset);
1264
1265 warning (_("Non-contiguous trace at instruction %u (offset = 0x%" PRIx64
1266 ", pc = 0x%" PRIx64 ")."), bfun->insn_offset - 1, offset,
1267 insn.ip);
1268 }
1269#endif /* defined (HAVE_STRUCT_PT_INSN_ENABLED) */
1270
1271#if defined (HAVE_STRUCT_PT_INSN_RESYNCED)
1272 /* Indicate trace overflows. */
1273 if (insn.resynced)
1274 {
1275 struct btrace_function *bfun;
1276 uint64_t offset;
1277
1278 bfun = ftrace_new_gap (btinfo, BDE_PT_OVERFLOW, gaps);
1279
1280 pt_insn_get_offset (decoder, &offset);
1281
1282 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64 ", pc = 0x%"
1283 PRIx64 ")."), bfun->insn_offset - 1, offset, insn.ip);
1284 }
1285#endif /* defined (HAVE_STRUCT_PT_INSN_RESYNCED) */
1286}
b5c36682 1287
17b89b34 1288/* Add function branch trace to BTINFO using DECODER. */
b20a6524
MM
1289
1290static void
17b89b34
TW
1291ftrace_add_pt (struct btrace_thread_info *btinfo,
1292 struct pt_insn_decoder *decoder,
b54b03bd 1293 int *plevel,
8ffd39f2 1294 std::vector<unsigned int> &gaps)
b20a6524 1295{
b54b03bd 1296 struct btrace_function *bfun;
b20a6524 1297 uint64_t offset;
13ace077 1298 int status;
b20a6524 1299
b20a6524
MM
1300 for (;;)
1301 {
b20a6524
MM
1302 struct pt_insn insn;
1303
13ace077
MM
1304 status = pt_insn_sync_forward (decoder);
1305 if (status < 0)
b20a6524 1306 {
13ace077 1307 if (status != -pte_eos)
bc504a31 1308 warning (_("Failed to synchronize onto the Intel Processor "
13ace077 1309 "Trace stream: %s."), pt_errstr (pt_errcode (status)));
b20a6524
MM
1310 break;
1311 }
1312
b20a6524
MM
1313 for (;;)
1314 {
13ace077
MM
1315 /* Handle events from the previous iteration or synchronization. */
1316 status = handle_pt_insn_events (btinfo, decoder, gaps, status);
1317 if (status < 0)
b20a6524
MM
1318 break;
1319
13ace077
MM
1320 status = pt_insn_next (decoder, &insn, sizeof(insn));
1321 if (status < 0)
1322 break;
b61ce85c 1323
13ace077
MM
1324 /* Handle events indicated by flags in INSN. */
1325 handle_pt_insn_event_flags (btinfo, decoder, insn, gaps);
b20a6524 1326
b54b03bd 1327 bfun = ftrace_update_function (btinfo, insn.ip);
b20a6524
MM
1328
1329 /* Maintain the function level offset. */
b54b03bd 1330 *plevel = std::min (*plevel, bfun->level);
b20a6524 1331
b5c36682 1332 btrace_insn btinsn = pt_btrace_insn (insn);
b54b03bd 1333 ftrace_update_insns (bfun, &btinsn);
b20a6524
MM
1334 }
1335
13ace077 1336 if (status == -pte_eos)
b20a6524
MM
1337 break;
1338
b20a6524 1339 /* Indicate the gap in the trace. */
13ace077 1340 bfun = ftrace_new_gap (btinfo, status, gaps);
b20a6524 1341
63ab433e
MM
1342 pt_insn_get_offset (decoder, &offset);
1343
1344 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
13ace077
MM
1345 ", pc = 0x%" PRIx64 "): %s."), status, bfun->insn_offset - 1,
1346 offset, insn.ip, pt_errstr (pt_errcode (status)));
63ab433e 1347 }
b20a6524
MM
1348}
1349
1350/* A callback function to allow the trace decoder to read the inferior's
1351 memory. */
1352
1353static int
1354btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 1355 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
1356 void *context)
1357{
43368e1d 1358 int result, errcode;
b20a6524 1359
43368e1d 1360 result = (int) size;
b20a6524
MM
1361 TRY
1362 {
80a2b330 1363 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 1364 if (errcode != 0)
43368e1d 1365 result = -pte_nomap;
b20a6524
MM
1366 }
1367 CATCH (error, RETURN_MASK_ERROR)
1368 {
43368e1d 1369 result = -pte_nomap;
b20a6524
MM
1370 }
1371 END_CATCH
1372
43368e1d 1373 return result;
b20a6524
MM
1374}
1375
1376/* Translate the vendor from one enum to another. */
1377
1378static enum pt_cpu_vendor
1379pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
1380{
1381 switch (vendor)
1382 {
1383 default:
1384 return pcv_unknown;
1385
1386 case CV_INTEL:
1387 return pcv_intel;
1388 }
1389}
1390
1391/* Finalize the function branch trace after decode. */
1392
1393static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
1394 struct thread_info *tp, int level)
1395{
1396 pt_insn_free_decoder (decoder);
1397
1398 /* LEVEL is the minimal function level of all btrace function segments.
1399 Define the global level offset to -LEVEL so all function levels are
1400 normalized to start at zero. */
1401 tp->btrace.level = -level;
1402
1403 /* Add a single last instruction entry for the current PC.
1404 This allows us to compute the backtrace at the current PC using both
1405 standard unwind and btrace unwind.
1406 This extra entry is ignored by all record commands. */
1407 btrace_add_pc (tp);
1408}
1409
bc504a31
PA
1410/* Compute the function branch trace from Intel Processor Trace
1411 format. */
b20a6524
MM
1412
1413static void
1414btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3 1415 const struct btrace_data_pt *btrace,
8ffd39f2 1416 std::vector<unsigned int> &gaps)
b20a6524
MM
1417{
1418 struct btrace_thread_info *btinfo;
1419 struct pt_insn_decoder *decoder;
1420 struct pt_config config;
1421 int level, errcode;
1422
1423 if (btrace->size == 0)
1424 return;
1425
1426 btinfo = &tp->btrace;
b54b03bd
TW
1427 if (btinfo->functions.empty ())
1428 level = INT_MAX;
1429 else
1430 level = -btinfo->level;
b20a6524
MM
1431
1432 pt_config_init(&config);
1433 config.begin = btrace->data;
1434 config.end = btrace->data + btrace->size;
1435
1436 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
1437 config.cpu.family = btrace->config.cpu.family;
1438 config.cpu.model = btrace->config.cpu.model;
1439 config.cpu.stepping = btrace->config.cpu.stepping;
1440
1441 errcode = pt_cpu_errata (&config.errata, &config.cpu);
1442 if (errcode < 0)
bc504a31 1443 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b20a6524
MM
1444 pt_errstr (pt_errcode (errcode)));
1445
1446 decoder = pt_insn_alloc_decoder (&config);
1447 if (decoder == NULL)
bc504a31 1448 error (_("Failed to allocate the Intel Processor Trace decoder."));
b20a6524
MM
1449
1450 TRY
1451 {
1452 struct pt_image *image;
1453
1454 image = pt_insn_get_image(decoder);
1455 if (image == NULL)
bc504a31 1456 error (_("Failed to configure the Intel Processor Trace decoder."));
b20a6524
MM
1457
1458 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
1459 if (errcode < 0)
bc504a31 1460 error (_("Failed to configure the Intel Processor Trace decoder: "
b20a6524
MM
1461 "%s."), pt_errstr (pt_errcode (errcode)));
1462
b54b03bd 1463 ftrace_add_pt (btinfo, decoder, &level, gaps);
b20a6524
MM
1464 }
1465 CATCH (error, RETURN_MASK_ALL)
1466 {
1467 /* Indicate a gap in the trace if we quit trace processing. */
b54b03bd 1468 if (error.reason == RETURN_QUIT && !btinfo->functions.empty ())
8ffd39f2 1469 ftrace_new_gap (btinfo, BDE_PT_USER_QUIT, gaps);
b20a6524
MM
1470
1471 btrace_finalize_ftrace_pt (decoder, tp, level);
1472
1473 throw_exception (error);
1474 }
1475 END_CATCH
1476
1477 btrace_finalize_ftrace_pt (decoder, tp, level);
1478}
1479
1480#else /* defined (HAVE_LIBIPT) */
1481
1482static void
1483btrace_compute_ftrace_pt (struct thread_info *tp,
d87fdac3 1484 const struct btrace_data_pt *btrace,
8ffd39f2 1485 std::vector<unsigned int> &gaps)
b20a6524
MM
1486{
1487 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
1488}
1489
1490#endif /* defined (HAVE_LIBIPT) */
1491
734b0e4b
MM
1492/* Compute the function branch trace from a block branch trace BTRACE for
1493 a thread given by BTINFO. */
1494
1495static void
d87fdac3 1496btrace_compute_ftrace_1 (struct thread_info *tp, struct btrace_data *btrace,
8ffd39f2 1497 std::vector<unsigned int> &gaps)
734b0e4b
MM
1498{
1499 DEBUG ("compute ftrace");
1500
1501 switch (btrace->format)
1502 {
1503 case BTRACE_FORMAT_NONE:
1504 return;
1505
1506 case BTRACE_FORMAT_BTS:
d87fdac3 1507 btrace_compute_ftrace_bts (tp, &btrace->variant.bts, gaps);
734b0e4b 1508 return;
b20a6524
MM
1509
1510 case BTRACE_FORMAT_PT:
d87fdac3 1511 btrace_compute_ftrace_pt (tp, &btrace->variant.pt, gaps);
b20a6524 1512 return;
734b0e4b
MM
1513 }
1514
1515 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1516}
1517
d87fdac3 1518static void
8ffd39f2 1519btrace_finalize_ftrace (struct thread_info *tp, std::vector<unsigned int> &gaps)
d87fdac3 1520{
8ffd39f2 1521 if (!gaps.empty ())
d87fdac3 1522 {
8ffd39f2 1523 tp->btrace.ngaps += gaps.size ();
d87fdac3
MM
1524 btrace_bridge_gaps (tp, gaps);
1525 }
1526}
1527
1528static void
1529btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
1530{
8ffd39f2 1531 std::vector<unsigned int> gaps;
d87fdac3
MM
1532
1533 TRY
1534 {
8ffd39f2 1535 btrace_compute_ftrace_1 (tp, btrace, gaps);
d87fdac3
MM
1536 }
1537 CATCH (error, RETURN_MASK_ALL)
1538 {
8ffd39f2 1539 btrace_finalize_ftrace (tp, gaps);
d87fdac3
MM
1540
1541 throw_exception (error);
1542 }
1543 END_CATCH
1544
8ffd39f2 1545 btrace_finalize_ftrace (tp, gaps);
d87fdac3
MM
1546}
1547
6e07b1d2
MM
1548/* Add an entry for the current PC. */
1549
1550static void
1551btrace_add_pc (struct thread_info *tp)
1552{
734b0e4b 1553 struct btrace_data btrace;
6e07b1d2
MM
1554 struct btrace_block *block;
1555 struct regcache *regcache;
1556 struct cleanup *cleanup;
1557 CORE_ADDR pc;
1558
1559 regcache = get_thread_regcache (tp->ptid);
1560 pc = regcache_read_pc (regcache);
1561
734b0e4b
MM
1562 btrace_data_init (&btrace);
1563 btrace.format = BTRACE_FORMAT_BTS;
1564 btrace.variant.bts.blocks = NULL;
6e07b1d2 1565
734b0e4b
MM
1566 cleanup = make_cleanup_btrace_data (&btrace);
1567
1568 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
1569 block->begin = pc;
1570 block->end = pc;
1571
76235df1 1572 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
1573
1574 do_cleanups (cleanup);
1575}
1576
02d27625
MM
1577/* See btrace.h. */
1578
1579void
f4abbc16 1580btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1581{
1582 if (tp->btrace.target != NULL)
1583 return;
1584
46a3515b
MM
1585#if !defined (HAVE_LIBIPT)
1586 if (conf->format == BTRACE_FORMAT_PT)
bc504a31 1587 error (_("GDB does not support Intel Processor Trace."));
46a3515b
MM
1588#endif /* !defined (HAVE_LIBIPT) */
1589
f4abbc16 1590 if (!target_supports_btrace (conf->format))
02d27625
MM
1591 error (_("Target does not support branch tracing."));
1592
43792cf0
PA
1593 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1594 target_pid_to_str (tp->ptid));
02d27625 1595
f4abbc16 1596 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2 1597
cd4007e4
MM
1598 /* We're done if we failed to enable tracing. */
1599 if (tp->btrace.target == NULL)
1600 return;
1601
1602 /* We need to undo the enable in case of errors. */
1603 TRY
1604 {
1605 /* Add an entry for the current PC so we start tracing from where we
1606 enabled it.
1607
1608 If we can't access TP's registers, TP is most likely running. In this
1609 case, we can't really say where tracing was enabled so it should be
1610 safe to simply skip this step.
1611
1612 This is not relevant for BTRACE_FORMAT_PT since the trace will already
1613 start at the PC at which tracing was enabled. */
1614 if (conf->format != BTRACE_FORMAT_PT
1615 && can_access_registers_ptid (tp->ptid))
1616 btrace_add_pc (tp);
1617 }
1618 CATCH (exception, RETURN_MASK_ALL)
1619 {
1620 btrace_disable (tp);
1621
1622 throw_exception (exception);
1623 }
1624 END_CATCH
02d27625
MM
1625}
1626
1627/* See btrace.h. */
1628
f4abbc16
MM
1629const struct btrace_config *
1630btrace_conf (const struct btrace_thread_info *btinfo)
1631{
1632 if (btinfo->target == NULL)
1633 return NULL;
1634
1635 return target_btrace_conf (btinfo->target);
1636}
1637
1638/* See btrace.h. */
1639
02d27625
MM
1640void
1641btrace_disable (struct thread_info *tp)
1642{
1643 struct btrace_thread_info *btp = &tp->btrace;
1644 int errcode = 0;
1645
1646 if (btp->target == NULL)
1647 return;
1648
43792cf0
PA
1649 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1650 target_pid_to_str (tp->ptid));
02d27625
MM
1651
1652 target_disable_btrace (btp->target);
1653 btp->target = NULL;
1654
1655 btrace_clear (tp);
1656}
1657
1658/* See btrace.h. */
1659
1660void
1661btrace_teardown (struct thread_info *tp)
1662{
1663 struct btrace_thread_info *btp = &tp->btrace;
1664 int errcode = 0;
1665
1666 if (btp->target == NULL)
1667 return;
1668
43792cf0
PA
1669 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1670 target_pid_to_str (tp->ptid));
02d27625
MM
1671
1672 target_teardown_btrace (btp->target);
1673 btp->target = NULL;
1674
1675 btrace_clear (tp);
1676}
1677
734b0e4b 1678/* Stitch branch trace in BTS format. */
969c39fb
MM
1679
1680static int
31fd9caa 1681btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1682{
31fd9caa 1683 struct btrace_thread_info *btinfo;
969c39fb
MM
1684 struct btrace_function *last_bfun;
1685 struct btrace_insn *last_insn;
1686 btrace_block_s *first_new_block;
1687
31fd9caa 1688 btinfo = &tp->btrace;
b54b03bd 1689 gdb_assert (!btinfo->functions.empty ());
31fd9caa
MM
1690 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1691
08c3f6d2 1692 last_bfun = &btinfo->functions.back ();
b54b03bd 1693
31fd9caa
MM
1694 /* If the existing trace ends with a gap, we just glue the traces
1695 together. We need to drop the last (i.e. chronologically first) block
1696 of the new trace, though, since we can't fill in the start address.*/
1697 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1698 {
1699 VEC_pop (btrace_block_s, btrace->blocks);
1700 return 0;
1701 }
969c39fb
MM
1702
1703 /* Beware that block trace starts with the most recent block, so the
1704 chronologically first block in the new trace is the last block in
1705 the new trace's block vector. */
734b0e4b 1706 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1707 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1708
1709 /* If the current PC at the end of the block is the same as in our current
1710 trace, there are two explanations:
1711 1. we executed the instruction and some branch brought us back.
1712 2. we have not made any progress.
1713 In the first case, the delta trace vector should contain at least two
1714 entries.
1715 In the second case, the delta trace vector should contain exactly one
1716 entry for the partial block containing the current PC. Remove it. */
1717 if (first_new_block->end == last_insn->pc
734b0e4b 1718 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1719 {
734b0e4b 1720 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1721 return 0;
1722 }
1723
1724 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1725 core_addr_to_string_nz (first_new_block->end));
1726
1727 /* Do a simple sanity check to make sure we don't accidentally end up
1728 with a bad block. This should not occur in practice. */
1729 if (first_new_block->end < last_insn->pc)
1730 {
1731 warning (_("Error while trying to read delta trace. Falling back to "
1732 "a full read."));
1733 return -1;
1734 }
1735
1736 /* We adjust the last block to start at the end of our current trace. */
1737 gdb_assert (first_new_block->begin == 0);
1738 first_new_block->begin = last_insn->pc;
1739
1740 /* We simply pop the last insn so we can insert it again as part of
1741 the normal branch trace computation.
1742 Since instruction iterators are based on indices in the instructions
1743 vector, we don't leave any pointers dangling. */
1744 DEBUG ("pruning insn at %s for stitching",
1745 ftrace_print_insn_addr (last_insn));
1746
1747 VEC_pop (btrace_insn_s, last_bfun->insn);
1748
1749 /* The instructions vector may become empty temporarily if this has
1750 been the only instruction in this function segment.
1751 This violates the invariant but will be remedied shortly by
1752 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1753
1754 /* The only case where this would hurt is if the entire trace consisted
1755 of just that one instruction. If we remove it, we might turn the now
1756 empty btrace function segment into a gap. But we don't want gaps at
1757 the beginning. To avoid this, we remove the entire old trace. */
b54b03bd 1758 if (last_bfun->number == 1 && VEC_empty (btrace_insn_s, last_bfun->insn))
31fd9caa
MM
1759 btrace_clear (tp);
1760
969c39fb
MM
1761 return 0;
1762}
1763
734b0e4b
MM
1764/* Adjust the block trace in order to stitch old and new trace together.
1765 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1766 TP is the traced thread.
1767 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1768 Return 0 on success, -1 otherwise. */
1769
1770static int
31fd9caa 1771btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1772{
1773 /* If we don't have trace, there's nothing to do. */
1774 if (btrace_data_empty (btrace))
1775 return 0;
1776
1777 switch (btrace->format)
1778 {
1779 case BTRACE_FORMAT_NONE:
1780 return 0;
1781
1782 case BTRACE_FORMAT_BTS:
31fd9caa 1783 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1784
1785 case BTRACE_FORMAT_PT:
1786 /* Delta reads are not supported. */
1787 return -1;
734b0e4b
MM
1788 }
1789
1790 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1791}
1792
969c39fb
MM
1793/* Clear the branch trace histories in BTINFO. */
1794
1795static void
1796btrace_clear_history (struct btrace_thread_info *btinfo)
1797{
1798 xfree (btinfo->insn_history);
1799 xfree (btinfo->call_history);
1800 xfree (btinfo->replay);
1801
1802 btinfo->insn_history = NULL;
1803 btinfo->call_history = NULL;
1804 btinfo->replay = NULL;
1805}
1806
b0627500
MM
1807/* Clear the branch trace maintenance histories in BTINFO. */
1808
1809static void
1810btrace_maint_clear (struct btrace_thread_info *btinfo)
1811{
1812 switch (btinfo->data.format)
1813 {
1814 default:
1815 break;
1816
1817 case BTRACE_FORMAT_BTS:
1818 btinfo->maint.variant.bts.packet_history.begin = 0;
1819 btinfo->maint.variant.bts.packet_history.end = 0;
1820 break;
1821
1822#if defined (HAVE_LIBIPT)
1823 case BTRACE_FORMAT_PT:
1824 xfree (btinfo->maint.variant.pt.packets);
1825
1826 btinfo->maint.variant.pt.packets = NULL;
1827 btinfo->maint.variant.pt.packet_history.begin = 0;
1828 btinfo->maint.variant.pt.packet_history.end = 0;
1829 break;
1830#endif /* defined (HAVE_LIBIPT) */
1831 }
1832}
1833
02d27625
MM
1834/* See btrace.h. */
1835
508352a9
TW
1836const char *
1837btrace_decode_error (enum btrace_format format, int errcode)
1838{
1839 switch (format)
1840 {
1841 case BTRACE_FORMAT_BTS:
1842 switch (errcode)
1843 {
1844 case BDE_BTS_OVERFLOW:
1845 return _("instruction overflow");
1846
1847 case BDE_BTS_INSN_SIZE:
1848 return _("unknown instruction");
1849
1850 default:
1851 break;
1852 }
1853 break;
1854
1855#if defined (HAVE_LIBIPT)
1856 case BTRACE_FORMAT_PT:
1857 switch (errcode)
1858 {
1859 case BDE_PT_USER_QUIT:
1860 return _("trace decode cancelled");
1861
1862 case BDE_PT_DISABLED:
1863 return _("disabled");
1864
1865 case BDE_PT_OVERFLOW:
1866 return _("overflow");
1867
1868 default:
1869 if (errcode < 0)
1870 return pt_errstr (pt_errcode (errcode));
1871 break;
1872 }
1873 break;
1874#endif /* defined (HAVE_LIBIPT) */
1875
1876 default:
1877 break;
1878 }
1879
1880 return _("unknown");
1881}
1882
1883/* See btrace.h. */
1884
02d27625
MM
1885void
1886btrace_fetch (struct thread_info *tp)
1887{
1888 struct btrace_thread_info *btinfo;
969c39fb 1889 struct btrace_target_info *tinfo;
734b0e4b 1890 struct btrace_data btrace;
23a7fe75 1891 struct cleanup *cleanup;
969c39fb 1892 int errcode;
02d27625 1893
43792cf0
PA
1894 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1895 target_pid_to_str (tp->ptid));
02d27625
MM
1896
1897 btinfo = &tp->btrace;
969c39fb
MM
1898 tinfo = btinfo->target;
1899 if (tinfo == NULL)
1900 return;
1901
1902 /* There's no way we could get new trace while replaying.
1903 On the other hand, delta trace would return a partial record with the
1904 current PC, which is the replay PC, not the last PC, as expected. */
1905 if (btinfo->replay != NULL)
02d27625
MM
1906 return;
1907
ae20e79a
TW
1908 /* With CLI usage, TP->PTID always equals INFERIOR_PTID here. Now that we
1909 can store a gdb.Record object in Python referring to a different thread
1910 than the current one, temporarily set INFERIOR_PTID. */
2989a365 1911 scoped_restore save_inferior_ptid = make_scoped_restore (&inferior_ptid);
ae20e79a
TW
1912 inferior_ptid = tp->ptid;
1913
cd4007e4
MM
1914 /* We should not be called on running or exited threads. */
1915 gdb_assert (can_access_registers_ptid (tp->ptid));
1916
734b0e4b 1917 btrace_data_init (&btrace);
2989a365 1918 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 1919
969c39fb 1920 /* Let's first try to extend the trace we already have. */
b54b03bd 1921 if (!btinfo->functions.empty ())
969c39fb
MM
1922 {
1923 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1924 if (errcode == 0)
1925 {
1926 /* Success. Let's try to stitch the traces together. */
31fd9caa 1927 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1928 }
1929 else
1930 {
1931 /* We failed to read delta trace. Let's try to read new trace. */
1932 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1933
1934 /* If we got any new trace, discard what we have. */
734b0e4b 1935 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1936 btrace_clear (tp);
1937 }
1938
1939 /* If we were not able to read the trace, we start over. */
1940 if (errcode != 0)
1941 {
1942 btrace_clear (tp);
1943 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1944 }
1945 }
1946 else
1947 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1948
1949 /* If we were not able to read the branch trace, signal an error. */
1950 if (errcode != 0)
1951 error (_("Failed to read branch trace."));
1952
1953 /* Compute the trace, provided we have any. */
734b0e4b 1954 if (!btrace_data_empty (&btrace))
23a7fe75 1955 {
9be54cae
MM
1956 /* Store the raw trace data. The stored data will be cleared in
1957 btrace_clear, so we always append the new trace. */
1958 btrace_data_append (&btinfo->data, &btrace);
b0627500 1959 btrace_maint_clear (btinfo);
9be54cae 1960
969c39fb 1961 btrace_clear_history (btinfo);
76235df1 1962 btrace_compute_ftrace (tp, &btrace);
23a7fe75 1963 }
02d27625 1964
23a7fe75 1965 do_cleanups (cleanup);
02d27625
MM
1966}
1967
1968/* See btrace.h. */
1969
1970void
1971btrace_clear (struct thread_info *tp)
1972{
1973 struct btrace_thread_info *btinfo;
1974
43792cf0
PA
1975 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1976 target_pid_to_str (tp->ptid));
02d27625 1977
0b722aec
MM
1978 /* Make sure btrace frames that may hold a pointer into the branch
1979 trace data are destroyed. */
1980 reinit_frame_cache ();
1981
02d27625 1982 btinfo = &tp->btrace;
17b89b34 1983 for (auto &bfun : btinfo->functions)
08c3f6d2 1984 VEC_free (btrace_insn_s, bfun.insn);
23a7fe75 1985
17b89b34 1986 btinfo->functions.clear ();
31fd9caa 1987 btinfo->ngaps = 0;
23a7fe75 1988
b0627500
MM
1989 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1990 btrace_maint_clear (btinfo);
9be54cae 1991 btrace_data_clear (&btinfo->data);
969c39fb 1992 btrace_clear_history (btinfo);
02d27625
MM
1993}
1994
1995/* See btrace.h. */
1996
1997void
1998btrace_free_objfile (struct objfile *objfile)
1999{
2000 struct thread_info *tp;
2001
2002 DEBUG ("free objfile");
2003
034f788c 2004 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
2005 btrace_clear (tp);
2006}
c12a2917
MM
2007
2008#if defined (HAVE_LIBEXPAT)
2009
2010/* Check the btrace document version. */
2011
2012static void
2013check_xml_btrace_version (struct gdb_xml_parser *parser,
2014 const struct gdb_xml_element *element,
2015 void *user_data, VEC (gdb_xml_value_s) *attributes)
2016{
9a3c8263
SM
2017 const char *version
2018 = (const char *) xml_find_attribute (attributes, "version")->value;
c12a2917
MM
2019
2020 if (strcmp (version, "1.0") != 0)
2021 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
2022}
2023
2024/* Parse a btrace "block" xml record. */
2025
2026static void
2027parse_xml_btrace_block (struct gdb_xml_parser *parser,
2028 const struct gdb_xml_element *element,
2029 void *user_data, VEC (gdb_xml_value_s) *attributes)
2030{
734b0e4b 2031 struct btrace_data *btrace;
c12a2917
MM
2032 struct btrace_block *block;
2033 ULONGEST *begin, *end;
2034
9a3c8263 2035 btrace = (struct btrace_data *) user_data;
734b0e4b
MM
2036
2037 switch (btrace->format)
2038 {
2039 case BTRACE_FORMAT_BTS:
2040 break;
2041
2042 case BTRACE_FORMAT_NONE:
2043 btrace->format = BTRACE_FORMAT_BTS;
2044 btrace->variant.bts.blocks = NULL;
2045 break;
2046
2047 default:
2048 gdb_xml_error (parser, _("Btrace format error."));
2049 }
c12a2917 2050
bc84451b
SM
2051 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
2052 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
c12a2917 2053
734b0e4b 2054 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
2055 block->begin = *begin;
2056 block->end = *end;
2057}
2058
b20a6524
MM
2059/* Parse a "raw" xml record. */
2060
2061static void
2062parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
e7b01ce0 2063 gdb_byte **pdata, size_t *psize)
b20a6524
MM
2064{
2065 struct cleanup *cleanup;
2066 gdb_byte *data, *bin;
e7b01ce0 2067 size_t len, size;
b20a6524
MM
2068
2069 len = strlen (body_text);
e7b01ce0 2070 if (len % 2 != 0)
b20a6524
MM
2071 gdb_xml_error (parser, _("Bad raw data size."));
2072
e7b01ce0
MM
2073 size = len / 2;
2074
224c3ddb 2075 bin = data = (gdb_byte *) xmalloc (size);
b20a6524
MM
2076 cleanup = make_cleanup (xfree, data);
2077
2078 /* We use hex encoding - see common/rsp-low.h. */
2079 while (len > 0)
2080 {
2081 char hi, lo;
2082
2083 hi = *body_text++;
2084 lo = *body_text++;
2085
2086 if (hi == 0 || lo == 0)
2087 gdb_xml_error (parser, _("Bad hex encoding."));
2088
2089 *bin++ = fromhex (hi) * 16 + fromhex (lo);
2090 len -= 2;
2091 }
2092
2093 discard_cleanups (cleanup);
2094
2095 *pdata = data;
2096 *psize = size;
2097}
2098
2099/* Parse a btrace pt-config "cpu" xml record. */
2100
2101static void
2102parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
2103 const struct gdb_xml_element *element,
2104 void *user_data,
2105 VEC (gdb_xml_value_s) *attributes)
2106{
2107 struct btrace_data *btrace;
2108 const char *vendor;
2109 ULONGEST *family, *model, *stepping;
2110
9a3c8263
SM
2111 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
2112 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
2113 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
2114 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
b20a6524 2115
9a3c8263 2116 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2117
2118 if (strcmp (vendor, "GenuineIntel") == 0)
2119 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
2120
2121 btrace->variant.pt.config.cpu.family = *family;
2122 btrace->variant.pt.config.cpu.model = *model;
2123 btrace->variant.pt.config.cpu.stepping = *stepping;
2124}
2125
2126/* Parse a btrace pt "raw" xml record. */
2127
2128static void
2129parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
2130 const struct gdb_xml_element *element,
2131 void *user_data, const char *body_text)
2132{
2133 struct btrace_data *btrace;
2134
9a3c8263 2135 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2136 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
2137 &btrace->variant.pt.size);
2138}
2139
2140/* Parse a btrace "pt" xml record. */
2141
2142static void
2143parse_xml_btrace_pt (struct gdb_xml_parser *parser,
2144 const struct gdb_xml_element *element,
2145 void *user_data, VEC (gdb_xml_value_s) *attributes)
2146{
2147 struct btrace_data *btrace;
2148
9a3c8263 2149 btrace = (struct btrace_data *) user_data;
b20a6524
MM
2150 btrace->format = BTRACE_FORMAT_PT;
2151 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
2152 btrace->variant.pt.data = NULL;
2153 btrace->variant.pt.size = 0;
2154}
2155
c12a2917
MM
2156static const struct gdb_xml_attribute block_attributes[] = {
2157 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2158 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2159 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2160};
2161
b20a6524
MM
2162static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
2163 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
2164 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2165 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2166 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
2167 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2168};
2169
2170static const struct gdb_xml_element btrace_pt_config_children[] = {
2171 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
2172 parse_xml_btrace_pt_config_cpu, NULL },
2173 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2174};
2175
2176static const struct gdb_xml_element btrace_pt_children[] = {
2177 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
2178 NULL },
2179 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
2180 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2181};
2182
c12a2917
MM
2183static const struct gdb_xml_attribute btrace_attributes[] = {
2184 { "version", GDB_XML_AF_NONE, NULL, NULL },
2185 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2186};
2187
2188static const struct gdb_xml_element btrace_children[] = {
2189 { "block", block_attributes, NULL,
2190 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
2191 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
2192 NULL },
c12a2917
MM
2193 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2194};
2195
2196static const struct gdb_xml_element btrace_elements[] = {
2197 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
2198 check_xml_btrace_version, NULL },
2199 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2200};
2201
2202#endif /* defined (HAVE_LIBEXPAT) */
2203
2204/* See btrace.h. */
2205
734b0e4b
MM
2206void
2207parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 2208{
c12a2917
MM
2209 struct cleanup *cleanup;
2210 int errcode;
2211
2212#if defined (HAVE_LIBEXPAT)
2213
734b0e4b
MM
2214 btrace->format = BTRACE_FORMAT_NONE;
2215
2216 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 2217 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 2218 buffer, btrace);
c12a2917 2219 if (errcode != 0)
969c39fb 2220 error (_("Error parsing branch trace."));
c12a2917
MM
2221
2222 /* Keep parse results. */
2223 discard_cleanups (cleanup);
2224
2225#else /* !defined (HAVE_LIBEXPAT) */
2226
2227 error (_("Cannot process branch trace. XML parsing is not supported."));
2228
2229#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 2230}
23a7fe75 2231
f4abbc16
MM
2232#if defined (HAVE_LIBEXPAT)
2233
2234/* Parse a btrace-conf "bts" xml record. */
2235
2236static void
2237parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
2238 const struct gdb_xml_element *element,
2239 void *user_data, VEC (gdb_xml_value_s) *attributes)
2240{
2241 struct btrace_config *conf;
d33501a5 2242 struct gdb_xml_value *size;
f4abbc16 2243
9a3c8263 2244 conf = (struct btrace_config *) user_data;
f4abbc16 2245 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
2246 conf->bts.size = 0;
2247
2248 size = xml_find_attribute (attributes, "size");
2249 if (size != NULL)
b20a6524 2250 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
2251}
2252
b20a6524
MM
2253/* Parse a btrace-conf "pt" xml record. */
2254
2255static void
2256parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
2257 const struct gdb_xml_element *element,
2258 void *user_data, VEC (gdb_xml_value_s) *attributes)
2259{
2260 struct btrace_config *conf;
2261 struct gdb_xml_value *size;
2262
9a3c8263 2263 conf = (struct btrace_config *) user_data;
b20a6524
MM
2264 conf->format = BTRACE_FORMAT_PT;
2265 conf->pt.size = 0;
2266
2267 size = xml_find_attribute (attributes, "size");
2268 if (size != NULL)
2269 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
2270}
2271
2272static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
2273 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2274 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2275};
2276
d33501a5
MM
2277static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
2278 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
2279 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2280};
2281
f4abbc16 2282static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
2283 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
2284 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
2285 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
2286 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
2287 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2288};
2289
2290static const struct gdb_xml_attribute btrace_conf_attributes[] = {
2291 { "version", GDB_XML_AF_NONE, NULL, NULL },
2292 { NULL, GDB_XML_AF_NONE, NULL, NULL }
2293};
2294
2295static const struct gdb_xml_element btrace_conf_elements[] = {
2296 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
2297 GDB_XML_EF_NONE, NULL, NULL },
2298 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
2299};
2300
2301#endif /* defined (HAVE_LIBEXPAT) */
2302
2303/* See btrace.h. */
2304
2305void
2306parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
2307{
2308 int errcode;
2309
2310#if defined (HAVE_LIBEXPAT)
2311
2312 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
2313 btrace_conf_elements, xml, conf);
2314 if (errcode != 0)
2315 error (_("Error parsing branch trace configuration."));
2316
2317#else /* !defined (HAVE_LIBEXPAT) */
2318
2319 error (_("XML parsing is not supported."));
2320
2321#endif /* !defined (HAVE_LIBEXPAT) */
2322}
2323
23a7fe75
MM
2324/* See btrace.h. */
2325
2326const struct btrace_insn *
2327btrace_insn_get (const struct btrace_insn_iterator *it)
2328{
2329 const struct btrace_function *bfun;
2330 unsigned int index, end;
2331
a0f1b963 2332 index = it->insn_index;
08c3f6d2 2333 bfun = &it->btinfo->functions[it->call_index];
23a7fe75 2334
31fd9caa
MM
2335 /* Check if the iterator points to a gap in the trace. */
2336 if (bfun->errcode != 0)
2337 return NULL;
2338
23a7fe75
MM
2339 /* The index is within the bounds of this function's instruction vector. */
2340 end = VEC_length (btrace_insn_s, bfun->insn);
2341 gdb_assert (0 < end);
2342 gdb_assert (index < end);
2343
2344 return VEC_index (btrace_insn_s, bfun->insn, index);
2345}
2346
2347/* See btrace.h. */
2348
69090cee
TW
2349int
2350btrace_insn_get_error (const struct btrace_insn_iterator *it)
23a7fe75 2351{
08c3f6d2 2352 return it->btinfo->functions[it->call_index].errcode;
69090cee 2353}
31fd9caa 2354
69090cee 2355/* See btrace.h. */
31fd9caa 2356
69090cee
TW
2357unsigned int
2358btrace_insn_number (const struct btrace_insn_iterator *it)
2359{
08c3f6d2 2360 return it->btinfo->functions[it->call_index].insn_offset + it->insn_index;
23a7fe75
MM
2361}
2362
2363/* See btrace.h. */
2364
2365void
2366btrace_insn_begin (struct btrace_insn_iterator *it,
2367 const struct btrace_thread_info *btinfo)
2368{
b54b03bd 2369 if (btinfo->functions.empty ())
23a7fe75
MM
2370 error (_("No trace."));
2371
521103fd 2372 it->btinfo = btinfo;
a0f1b963
TW
2373 it->call_index = 0;
2374 it->insn_index = 0;
23a7fe75
MM
2375}
2376
2377/* See btrace.h. */
2378
2379void
2380btrace_insn_end (struct btrace_insn_iterator *it,
2381 const struct btrace_thread_info *btinfo)
2382{
2383 const struct btrace_function *bfun;
2384 unsigned int length;
2385
b54b03bd 2386 if (btinfo->functions.empty ())
23a7fe75
MM
2387 error (_("No trace."));
2388
08c3f6d2 2389 bfun = &btinfo->functions.back ();
23a7fe75
MM
2390 length = VEC_length (btrace_insn_s, bfun->insn);
2391
31fd9caa
MM
2392 /* The last function may either be a gap or it contains the current
2393 instruction, which is one past the end of the execution trace; ignore
2394 it. */
2395 if (length > 0)
2396 length -= 1;
2397
521103fd 2398 it->btinfo = btinfo;
a0f1b963
TW
2399 it->call_index = bfun->number - 1;
2400 it->insn_index = length;
23a7fe75
MM
2401}
2402
2403/* See btrace.h. */
2404
2405unsigned int
2406btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
2407{
2408 const struct btrace_function *bfun;
2409 unsigned int index, steps;
2410
08c3f6d2 2411 bfun = &it->btinfo->functions[it->call_index];
23a7fe75 2412 steps = 0;
a0f1b963 2413 index = it->insn_index;
23a7fe75
MM
2414
2415 while (stride != 0)
2416 {
2417 unsigned int end, space, adv;
2418
2419 end = VEC_length (btrace_insn_s, bfun->insn);
2420
31fd9caa
MM
2421 /* An empty function segment represents a gap in the trace. We count
2422 it as one instruction. */
2423 if (end == 0)
2424 {
2425 const struct btrace_function *next;
2426
eb8f2b9c 2427 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
31fd9caa
MM
2428 if (next == NULL)
2429 break;
2430
2431 stride -= 1;
2432 steps += 1;
2433
2434 bfun = next;
2435 index = 0;
2436
2437 continue;
2438 }
2439
23a7fe75
MM
2440 gdb_assert (0 < end);
2441 gdb_assert (index < end);
2442
2443 /* Compute the number of instructions remaining in this segment. */
2444 space = end - index;
2445
2446 /* Advance the iterator as far as possible within this segment. */
325fac50 2447 adv = std::min (space, stride);
23a7fe75
MM
2448 stride -= adv;
2449 index += adv;
2450 steps += adv;
2451
2452 /* Move to the next function if we're at the end of this one. */
2453 if (index == end)
2454 {
2455 const struct btrace_function *next;
2456
eb8f2b9c 2457 next = ftrace_find_call_by_number (it->btinfo, bfun->number + 1);
23a7fe75
MM
2458 if (next == NULL)
2459 {
2460 /* We stepped past the last function.
2461
2462 Let's adjust the index to point to the last instruction in
2463 the previous function. */
2464 index -= 1;
2465 steps -= 1;
2466 break;
2467 }
2468
2469 /* We now point to the first instruction in the new function. */
2470 bfun = next;
2471 index = 0;
2472 }
2473
2474 /* We did make progress. */
2475 gdb_assert (adv > 0);
2476 }
2477
2478 /* Update the iterator. */
a0f1b963
TW
2479 it->call_index = bfun->number - 1;
2480 it->insn_index = index;
23a7fe75
MM
2481
2482 return steps;
2483}
2484
2485/* See btrace.h. */
2486
2487unsigned int
2488btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
2489{
2490 const struct btrace_function *bfun;
2491 unsigned int index, steps;
2492
08c3f6d2 2493 bfun = &it->btinfo->functions[it->call_index];
23a7fe75 2494 steps = 0;
a0f1b963 2495 index = it->insn_index;
23a7fe75
MM
2496
2497 while (stride != 0)
2498 {
2499 unsigned int adv;
2500
2501 /* Move to the previous function if we're at the start of this one. */
2502 if (index == 0)
2503 {
2504 const struct btrace_function *prev;
2505
eb8f2b9c 2506 prev = ftrace_find_call_by_number (it->btinfo, bfun->number - 1);
23a7fe75
MM
2507 if (prev == NULL)
2508 break;
2509
2510 /* We point to one after the last instruction in the new function. */
2511 bfun = prev;
2512 index = VEC_length (btrace_insn_s, bfun->insn);
2513
31fd9caa
MM
2514 /* An empty function segment represents a gap in the trace. We count
2515 it as one instruction. */
2516 if (index == 0)
2517 {
2518 stride -= 1;
2519 steps += 1;
2520
2521 continue;
2522 }
23a7fe75
MM
2523 }
2524
2525 /* Advance the iterator as far as possible within this segment. */
325fac50 2526 adv = std::min (index, stride);
31fd9caa 2527
23a7fe75
MM
2528 stride -= adv;
2529 index -= adv;
2530 steps += adv;
2531
2532 /* We did make progress. */
2533 gdb_assert (adv > 0);
2534 }
2535
2536 /* Update the iterator. */
a0f1b963
TW
2537 it->call_index = bfun->number - 1;
2538 it->insn_index = index;
23a7fe75
MM
2539
2540 return steps;
2541}
2542
2543/* See btrace.h. */
2544
2545int
2546btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
2547 const struct btrace_insn_iterator *rhs)
2548{
a0f1b963 2549 gdb_assert (lhs->btinfo == rhs->btinfo);
23a7fe75 2550
a0f1b963
TW
2551 if (lhs->call_index != rhs->call_index)
2552 return lhs->call_index - rhs->call_index;
23a7fe75 2553
a0f1b963 2554 return lhs->insn_index - rhs->insn_index;
23a7fe75
MM
2555}
2556
2557/* See btrace.h. */
2558
2559int
2560btrace_find_insn_by_number (struct btrace_insn_iterator *it,
2561 const struct btrace_thread_info *btinfo,
2562 unsigned int number)
2563{
2564 const struct btrace_function *bfun;
fdd2bd92 2565 unsigned int upper, lower;
23a7fe75 2566
2b51eddc 2567 if (btinfo->functions.empty ())
fdd2bd92 2568 return 0;
23a7fe75 2569
fdd2bd92 2570 lower = 0;
08c3f6d2 2571 bfun = &btinfo->functions[lower];
fdd2bd92 2572 if (number < bfun->insn_offset)
23a7fe75
MM
2573 return 0;
2574
2b51eddc 2575 upper = btinfo->functions.size () - 1;
08c3f6d2 2576 bfun = &btinfo->functions[upper];
fdd2bd92 2577 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
23a7fe75
MM
2578 return 0;
2579
fdd2bd92
TW
2580 /* We assume that there are no holes in the numbering. */
2581 for (;;)
2582 {
2583 const unsigned int average = lower + (upper - lower) / 2;
2584
08c3f6d2 2585 bfun = &btinfo->functions[average];
fdd2bd92
TW
2586
2587 if (number < bfun->insn_offset)
2588 {
2589 upper = average - 1;
2590 continue;
2591 }
2592
2593 if (number >= bfun->insn_offset + ftrace_call_num_insn (bfun))
2594 {
2595 lower = average + 1;
2596 continue;
2597 }
2598
2599 break;
2600 }
2601
521103fd 2602 it->btinfo = btinfo;
a0f1b963
TW
2603 it->call_index = bfun->number - 1;
2604 it->insn_index = number - bfun->insn_offset;
23a7fe75
MM
2605 return 1;
2606}
2607
f158f208
TW
2608/* Returns true if the recording ends with a function segment that
2609 contains only a single (i.e. the current) instruction. */
2610
2611static bool
2612btrace_ends_with_single_insn (const struct btrace_thread_info *btinfo)
2613{
2614 const btrace_function *bfun;
2615
2616 if (btinfo->functions.empty ())
2617 return false;
2618
08c3f6d2 2619 bfun = &btinfo->functions.back ();
f158f208
TW
2620 if (bfun->errcode != 0)
2621 return false;
2622
2623 return ftrace_call_num_insn (bfun) == 1;
2624}
2625
23a7fe75
MM
2626/* See btrace.h. */
2627
2628const struct btrace_function *
2629btrace_call_get (const struct btrace_call_iterator *it)
2630{
f158f208
TW
2631 if (it->index >= it->btinfo->functions.size ())
2632 return NULL;
2633
08c3f6d2 2634 return &it->btinfo->functions[it->index];
23a7fe75
MM
2635}
2636
2637/* See btrace.h. */
2638
2639unsigned int
2640btrace_call_number (const struct btrace_call_iterator *it)
2641{
f158f208 2642 const unsigned int length = it->btinfo->functions.size ();
23a7fe75 2643
f158f208
TW
2644 /* If the last function segment contains only a single instruction (i.e. the
2645 current instruction), skip it. */
2646 if ((it->index == length) && btrace_ends_with_single_insn (it->btinfo))
2647 return length;
23a7fe75 2648
f158f208 2649 return it->index + 1;
23a7fe75
MM
2650}
2651
2652/* See btrace.h. */
2653
2654void
2655btrace_call_begin (struct btrace_call_iterator *it,
2656 const struct btrace_thread_info *btinfo)
2657{
f158f208 2658 if (btinfo->functions.empty ())
23a7fe75
MM
2659 error (_("No trace."));
2660
2661 it->btinfo = btinfo;
f158f208 2662 it->index = 0;
23a7fe75
MM
2663}
2664
2665/* See btrace.h. */
2666
2667void
2668btrace_call_end (struct btrace_call_iterator *it,
2669 const struct btrace_thread_info *btinfo)
2670{
f158f208 2671 if (btinfo->functions.empty ())
23a7fe75
MM
2672 error (_("No trace."));
2673
2674 it->btinfo = btinfo;
f158f208 2675 it->index = btinfo->functions.size ();
23a7fe75
MM
2676}
2677
2678/* See btrace.h. */
2679
2680unsigned int
2681btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2682{
f158f208 2683 const unsigned int length = it->btinfo->functions.size ();
23a7fe75 2684
f158f208
TW
2685 if (it->index + stride < length - 1)
2686 /* Default case: Simply advance the iterator. */
2687 it->index += stride;
2688 else if (it->index + stride == length - 1)
23a7fe75 2689 {
f158f208
TW
2690 /* We land exactly at the last function segment. If it contains only one
2691 instruction (i.e. the current instruction) it is not actually part of
2692 the trace. */
2693 if (btrace_ends_with_single_insn (it->btinfo))
2694 it->index = length;
2695 else
2696 it->index = length - 1;
2697 }
2698 else
2699 {
2700 /* We land past the last function segment and have to adjust the stride.
2701 If the last function segment contains only one instruction (i.e. the
2702 current instruction) it is not actually part of the trace. */
2703 if (btrace_ends_with_single_insn (it->btinfo))
2704 stride = length - it->index - 1;
2705 else
2706 stride = length - it->index;
23a7fe75 2707
f158f208 2708 it->index = length;
23a7fe75
MM
2709 }
2710
f158f208 2711 return stride;
23a7fe75
MM
2712}
2713
2714/* See btrace.h. */
2715
2716unsigned int
2717btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2718{
f158f208
TW
2719 const unsigned int length = it->btinfo->functions.size ();
2720 int steps = 0;
23a7fe75 2721
f158f208 2722 gdb_assert (it->index <= length);
23a7fe75 2723
f158f208
TW
2724 if (stride == 0 || it->index == 0)
2725 return 0;
23a7fe75 2726
f158f208
TW
2727 /* If we are at the end, the first step is a special case. If the last
2728 function segment contains only one instruction (i.e. the current
2729 instruction) it is not actually part of the trace. To be able to step
2730 over this instruction, we need at least one more function segment. */
2731 if ((it->index == length) && (length > 1))
23a7fe75 2732 {
f158f208
TW
2733 if (btrace_ends_with_single_insn (it->btinfo))
2734 it->index = length - 2;
2735 else
2736 it->index = length - 1;
23a7fe75 2737
f158f208
TW
2738 steps = 1;
2739 stride -= 1;
23a7fe75
MM
2740 }
2741
f158f208
TW
2742 stride = std::min (stride, it->index);
2743
2744 it->index -= stride;
2745 return steps + stride;
23a7fe75
MM
2746}
2747
2748/* See btrace.h. */
2749
2750int
2751btrace_call_cmp (const struct btrace_call_iterator *lhs,
2752 const struct btrace_call_iterator *rhs)
2753{
f158f208
TW
2754 gdb_assert (lhs->btinfo == rhs->btinfo);
2755 return (int) (lhs->index - rhs->index);
23a7fe75
MM
2756}
2757
2758/* See btrace.h. */
2759
2760int
2761btrace_find_call_by_number (struct btrace_call_iterator *it,
2762 const struct btrace_thread_info *btinfo,
2763 unsigned int number)
2764{
f158f208 2765 const unsigned int length = btinfo->functions.size ();
23a7fe75 2766
f158f208
TW
2767 if ((number == 0) || (number > length))
2768 return 0;
23a7fe75 2769
f158f208
TW
2770 it->btinfo = btinfo;
2771 it->index = number - 1;
2772 return 1;
23a7fe75
MM
2773}
2774
2775/* See btrace.h. */
2776
2777void
2778btrace_set_insn_history (struct btrace_thread_info *btinfo,
2779 const struct btrace_insn_iterator *begin,
2780 const struct btrace_insn_iterator *end)
2781{
2782 if (btinfo->insn_history == NULL)
8d749320 2783 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2784
2785 btinfo->insn_history->begin = *begin;
2786 btinfo->insn_history->end = *end;
2787}
2788
2789/* See btrace.h. */
2790
2791void
2792btrace_set_call_history (struct btrace_thread_info *btinfo,
2793 const struct btrace_call_iterator *begin,
2794 const struct btrace_call_iterator *end)
2795{
2796 gdb_assert (begin->btinfo == end->btinfo);
2797
2798 if (btinfo->call_history == NULL)
8d749320 2799 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2800
2801 btinfo->call_history->begin = *begin;
2802 btinfo->call_history->end = *end;
2803}
07bbe694
MM
2804
2805/* See btrace.h. */
2806
2807int
2808btrace_is_replaying (struct thread_info *tp)
2809{
2810 return tp->btrace.replay != NULL;
2811}
6e07b1d2
MM
2812
2813/* See btrace.h. */
2814
2815int
2816btrace_is_empty (struct thread_info *tp)
2817{
2818 struct btrace_insn_iterator begin, end;
2819 struct btrace_thread_info *btinfo;
2820
2821 btinfo = &tp->btrace;
2822
b54b03bd 2823 if (btinfo->functions.empty ())
6e07b1d2
MM
2824 return 1;
2825
2826 btrace_insn_begin (&begin, btinfo);
2827 btrace_insn_end (&end, btinfo);
2828
2829 return btrace_insn_cmp (&begin, &end) == 0;
2830}
734b0e4b
MM
2831
2832/* Forward the cleanup request. */
2833
2834static void
2835do_btrace_data_cleanup (void *arg)
2836{
9a3c8263 2837 btrace_data_fini ((struct btrace_data *) arg);
734b0e4b
MM
2838}
2839
2840/* See btrace.h. */
2841
2842struct cleanup *
2843make_cleanup_btrace_data (struct btrace_data *data)
2844{
2845 return make_cleanup (do_btrace_data_cleanup, data);
2846}
b0627500
MM
2847
2848#if defined (HAVE_LIBIPT)
2849
2850/* Print a single packet. */
2851
2852static void
2853pt_print_packet (const struct pt_packet *packet)
2854{
2855 switch (packet->type)
2856 {
2857 default:
2858 printf_unfiltered (("[??: %x]"), packet->type);
2859 break;
2860
2861 case ppt_psb:
2862 printf_unfiltered (("psb"));
2863 break;
2864
2865 case ppt_psbend:
2866 printf_unfiltered (("psbend"));
2867 break;
2868
2869 case ppt_pad:
2870 printf_unfiltered (("pad"));
2871 break;
2872
2873 case ppt_tip:
2874 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2875 packet->payload.ip.ipc,
2876 packet->payload.ip.ip);
2877 break;
2878
2879 case ppt_tip_pge:
2880 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2881 packet->payload.ip.ipc,
2882 packet->payload.ip.ip);
2883 break;
2884
2885 case ppt_tip_pgd:
2886 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2887 packet->payload.ip.ipc,
2888 packet->payload.ip.ip);
2889 break;
2890
2891 case ppt_fup:
2892 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2893 packet->payload.ip.ipc,
2894 packet->payload.ip.ip);
2895 break;
2896
2897 case ppt_tnt_8:
2898 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2899 packet->payload.tnt.bit_size,
2900 packet->payload.tnt.payload);
2901 break;
2902
2903 case ppt_tnt_64:
2904 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2905 packet->payload.tnt.bit_size,
2906 packet->payload.tnt.payload);
2907 break;
2908
2909 case ppt_pip:
37fdfe4c
MM
2910 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2911 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2912 break;
2913
2914 case ppt_tsc:
2915 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2916 break;
2917
2918 case ppt_cbr:
2919 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2920 break;
2921
2922 case ppt_mode:
2923 switch (packet->payload.mode.leaf)
2924 {
2925 default:
2926 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2927 break;
2928
2929 case pt_mol_exec:
2930 printf_unfiltered (("mode.exec%s%s"),
2931 packet->payload.mode.bits.exec.csl
2932 ? (" cs.l") : (""),
2933 packet->payload.mode.bits.exec.csd
2934 ? (" cs.d") : (""));
2935 break;
2936
2937 case pt_mol_tsx:
2938 printf_unfiltered (("mode.tsx%s%s"),
2939 packet->payload.mode.bits.tsx.intx
2940 ? (" intx") : (""),
2941 packet->payload.mode.bits.tsx.abrt
2942 ? (" abrt") : (""));
2943 break;
2944 }
2945 break;
2946
2947 case ppt_ovf:
2948 printf_unfiltered (("ovf"));
2949 break;
2950
37fdfe4c
MM
2951 case ppt_stop:
2952 printf_unfiltered (("stop"));
2953 break;
2954
2955 case ppt_vmcs:
2956 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2957 break;
2958
2959 case ppt_tma:
2960 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2961 packet->payload.tma.fc);
2962 break;
2963
2964 case ppt_mtc:
2965 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2966 break;
2967
2968 case ppt_cyc:
2969 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2970 break;
2971
2972 case ppt_mnt:
2973 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2974 break;
b0627500
MM
2975 }
2976}
2977
2978/* Decode packets into MAINT using DECODER. */
2979
2980static void
2981btrace_maint_decode_pt (struct btrace_maint_info *maint,
2982 struct pt_packet_decoder *decoder)
2983{
2984 int errcode;
2985
2986 for (;;)
2987 {
2988 struct btrace_pt_packet packet;
2989
2990 errcode = pt_pkt_sync_forward (decoder);
2991 if (errcode < 0)
2992 break;
2993
2994 for (;;)
2995 {
2996 pt_pkt_get_offset (decoder, &packet.offset);
2997
2998 errcode = pt_pkt_next (decoder, &packet.packet,
2999 sizeof(packet.packet));
3000 if (errcode < 0)
3001 break;
3002
3003 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
3004 {
3005 packet.errcode = pt_errcode (errcode);
3006 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
3007 &packet);
3008 }
3009 }
3010
3011 if (errcode == -pte_eos)
3012 break;
3013
3014 packet.errcode = pt_errcode (errcode);
3015 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
3016 &packet);
3017
3018 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
3019 packet.offset, pt_errstr (packet.errcode));
3020 }
3021
3022 if (errcode != -pte_eos)
bc504a31 3023 warning (_("Failed to synchronize onto the Intel Processor Trace "
b0627500
MM
3024 "stream: %s."), pt_errstr (pt_errcode (errcode)));
3025}
3026
3027/* Update the packet history in BTINFO. */
3028
3029static void
3030btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
3031{
3032 volatile struct gdb_exception except;
3033 struct pt_packet_decoder *decoder;
3034 struct btrace_data_pt *pt;
3035 struct pt_config config;
3036 int errcode;
3037
3038 pt = &btinfo->data.variant.pt;
3039
3040 /* Nothing to do if there is no trace. */
3041 if (pt->size == 0)
3042 return;
3043
3044 memset (&config, 0, sizeof(config));
3045
3046 config.size = sizeof (config);
3047 config.begin = pt->data;
3048 config.end = pt->data + pt->size;
3049
3050 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
3051 config.cpu.family = pt->config.cpu.family;
3052 config.cpu.model = pt->config.cpu.model;
3053 config.cpu.stepping = pt->config.cpu.stepping;
3054
3055 errcode = pt_cpu_errata (&config.errata, &config.cpu);
3056 if (errcode < 0)
bc504a31 3057 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b0627500
MM
3058 pt_errstr (pt_errcode (errcode)));
3059
3060 decoder = pt_pkt_alloc_decoder (&config);
3061 if (decoder == NULL)
bc504a31 3062 error (_("Failed to allocate the Intel Processor Trace decoder."));
b0627500
MM
3063
3064 TRY
3065 {
3066 btrace_maint_decode_pt (&btinfo->maint, decoder);
3067 }
3068 CATCH (except, RETURN_MASK_ALL)
3069 {
3070 pt_pkt_free_decoder (decoder);
3071
3072 if (except.reason < 0)
3073 throw_exception (except);
3074 }
3075 END_CATCH
3076
3077 pt_pkt_free_decoder (decoder);
3078}
3079
3080#endif /* !defined (HAVE_LIBIPT) */
3081
3082/* Update the packet maintenance information for BTINFO and store the
3083 low and high bounds into BEGIN and END, respectively.
3084 Store the current iterator state into FROM and TO. */
3085
3086static void
3087btrace_maint_update_packets (struct btrace_thread_info *btinfo,
3088 unsigned int *begin, unsigned int *end,
3089 unsigned int *from, unsigned int *to)
3090{
3091 switch (btinfo->data.format)
3092 {
3093 default:
3094 *begin = 0;
3095 *end = 0;
3096 *from = 0;
3097 *to = 0;
3098 break;
3099
3100 case BTRACE_FORMAT_BTS:
3101 /* Nothing to do - we operate directly on BTINFO->DATA. */
3102 *begin = 0;
3103 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
3104 *from = btinfo->maint.variant.bts.packet_history.begin;
3105 *to = btinfo->maint.variant.bts.packet_history.end;
3106 break;
3107
3108#if defined (HAVE_LIBIPT)
3109 case BTRACE_FORMAT_PT:
3110 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
3111 btrace_maint_update_pt_packets (btinfo);
3112
3113 *begin = 0;
3114 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
3115 *from = btinfo->maint.variant.pt.packet_history.begin;
3116 *to = btinfo->maint.variant.pt.packet_history.end;
3117 break;
3118#endif /* defined (HAVE_LIBIPT) */
3119 }
3120}
3121
3122/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
3123 update the current iterator position. */
3124
3125static void
3126btrace_maint_print_packets (struct btrace_thread_info *btinfo,
3127 unsigned int begin, unsigned int end)
3128{
3129 switch (btinfo->data.format)
3130 {
3131 default:
3132 break;
3133
3134 case BTRACE_FORMAT_BTS:
3135 {
3136 VEC (btrace_block_s) *blocks;
3137 unsigned int blk;
3138
3139 blocks = btinfo->data.variant.bts.blocks;
3140 for (blk = begin; blk < end; ++blk)
3141 {
3142 const btrace_block_s *block;
3143
3144 block = VEC_index (btrace_block_s, blocks, blk);
3145
3146 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
3147 core_addr_to_string_nz (block->begin),
3148 core_addr_to_string_nz (block->end));
3149 }
3150
3151 btinfo->maint.variant.bts.packet_history.begin = begin;
3152 btinfo->maint.variant.bts.packet_history.end = end;
3153 }
3154 break;
3155
3156#if defined (HAVE_LIBIPT)
3157 case BTRACE_FORMAT_PT:
3158 {
3159 VEC (btrace_pt_packet_s) *packets;
3160 unsigned int pkt;
3161
3162 packets = btinfo->maint.variant.pt.packets;
3163 for (pkt = begin; pkt < end; ++pkt)
3164 {
3165 const struct btrace_pt_packet *packet;
3166
3167 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
3168
3169 printf_unfiltered ("%u\t", pkt);
3170 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
3171
3172 if (packet->errcode == pte_ok)
3173 pt_print_packet (&packet->packet);
3174 else
3175 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
3176
3177 printf_unfiltered ("\n");
3178 }
3179
3180 btinfo->maint.variant.pt.packet_history.begin = begin;
3181 btinfo->maint.variant.pt.packet_history.end = end;
3182 }
3183 break;
3184#endif /* defined (HAVE_LIBIPT) */
3185 }
3186}
3187
3188/* Read a number from an argument string. */
3189
3190static unsigned int
3191get_uint (char **arg)
3192{
3193 char *begin, *end, *pos;
3194 unsigned long number;
3195
3196 begin = *arg;
3197 pos = skip_spaces (begin);
3198
3199 if (!isdigit (*pos))
3200 error (_("Expected positive number, got: %s."), pos);
3201
3202 number = strtoul (pos, &end, 10);
3203 if (number > UINT_MAX)
3204 error (_("Number too big."));
3205
3206 *arg += (end - begin);
3207
3208 return (unsigned int) number;
3209}
3210
3211/* Read a context size from an argument string. */
3212
3213static int
3214get_context_size (char **arg)
3215{
3216 char *pos;
3217 int number;
3218
3219 pos = skip_spaces (*arg);
3220
3221 if (!isdigit (*pos))
3222 error (_("Expected positive number, got: %s."), pos);
3223
3224 return strtol (pos, arg, 10);
3225}
3226
3227/* Complain about junk at the end of an argument string. */
3228
3229static void
3230no_chunk (char *arg)
3231{
3232 if (*arg != 0)
3233 error (_("Junk after argument: %s."), arg);
3234}
3235
3236/* The "maintenance btrace packet-history" command. */
3237
3238static void
3239maint_btrace_packet_history_cmd (char *arg, int from_tty)
3240{
3241 struct btrace_thread_info *btinfo;
3242 struct thread_info *tp;
3243 unsigned int size, begin, end, from, to;
3244
3245 tp = find_thread_ptid (inferior_ptid);
3246 if (tp == NULL)
3247 error (_("No thread."));
3248
3249 size = 10;
3250 btinfo = &tp->btrace;
3251
3252 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
3253 if (begin == end)
3254 {
3255 printf_unfiltered (_("No trace.\n"));
3256 return;
3257 }
3258
3259 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
3260 {
3261 from = to;
3262
3263 if (end - from < size)
3264 size = end - from;
3265 to = from + size;
3266 }
3267 else if (strcmp (arg, "-") == 0)
3268 {
3269 to = from;
3270
3271 if (to - begin < size)
3272 size = to - begin;
3273 from = to - size;
3274 }
3275 else
3276 {
3277 from = get_uint (&arg);
3278 if (end <= from)
3279 error (_("'%u' is out of range."), from);
3280
3281 arg = skip_spaces (arg);
3282 if (*arg == ',')
3283 {
3284 arg = skip_spaces (++arg);
3285
3286 if (*arg == '+')
3287 {
3288 arg += 1;
3289 size = get_context_size (&arg);
3290
3291 no_chunk (arg);
3292
3293 if (end - from < size)
3294 size = end - from;
3295 to = from + size;
3296 }
3297 else if (*arg == '-')
3298 {
3299 arg += 1;
3300 size = get_context_size (&arg);
3301
3302 no_chunk (arg);
3303
3304 /* Include the packet given as first argument. */
3305 from += 1;
3306 to = from;
3307
3308 if (to - begin < size)
3309 size = to - begin;
3310 from = to - size;
3311 }
3312 else
3313 {
3314 to = get_uint (&arg);
3315
3316 /* Include the packet at the second argument and silently
3317 truncate the range. */
3318 if (to < end)
3319 to += 1;
3320 else
3321 to = end;
3322
3323 no_chunk (arg);
3324 }
3325 }
3326 else
3327 {
3328 no_chunk (arg);
3329
3330 if (end - from < size)
3331 size = end - from;
3332 to = from + size;
3333 }
3334
3335 dont_repeat ();
3336 }
3337
3338 btrace_maint_print_packets (btinfo, from, to);
3339}
3340
3341/* The "maintenance btrace clear-packet-history" command. */
3342
3343static void
3344maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
3345{
3346 struct btrace_thread_info *btinfo;
3347 struct thread_info *tp;
3348
3349 if (args != NULL && *args != 0)
3350 error (_("Invalid argument."));
3351
3352 tp = find_thread_ptid (inferior_ptid);
3353 if (tp == NULL)
3354 error (_("No thread."));
3355
3356 btinfo = &tp->btrace;
3357
3358 /* Must clear the maint data before - it depends on BTINFO->DATA. */
3359 btrace_maint_clear (btinfo);
3360 btrace_data_clear (&btinfo->data);
3361}
3362
3363/* The "maintenance btrace clear" command. */
3364
3365static void
3366maint_btrace_clear_cmd (char *args, int from_tty)
3367{
3368 struct btrace_thread_info *btinfo;
3369 struct thread_info *tp;
3370
3371 if (args != NULL && *args != 0)
3372 error (_("Invalid argument."));
3373
3374 tp = find_thread_ptid (inferior_ptid);
3375 if (tp == NULL)
3376 error (_("No thread."));
3377
3378 btrace_clear (tp);
3379}
3380
3381/* The "maintenance btrace" command. */
3382
3383static void
3384maint_btrace_cmd (char *args, int from_tty)
3385{
3386 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
3387 gdb_stdout);
3388}
3389
3390/* The "maintenance set btrace" command. */
3391
3392static void
3393maint_btrace_set_cmd (char *args, int from_tty)
3394{
3395 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
3396 gdb_stdout);
3397}
3398
3399/* The "maintenance show btrace" command. */
3400
3401static void
3402maint_btrace_show_cmd (char *args, int from_tty)
3403{
3404 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
3405 all_commands, gdb_stdout);
3406}
3407
3408/* The "maintenance set btrace pt" command. */
3409
3410static void
3411maint_btrace_pt_set_cmd (char *args, int from_tty)
3412{
3413 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3414 all_commands, gdb_stdout);
3415}
3416
3417/* The "maintenance show btrace pt" command. */
3418
3419static void
3420maint_btrace_pt_show_cmd (char *args, int from_tty)
3421{
3422 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3423 all_commands, gdb_stdout);
3424}
3425
3426/* The "maintenance info btrace" command. */
3427
3428static void
3429maint_info_btrace_cmd (char *args, int from_tty)
3430{
3431 struct btrace_thread_info *btinfo;
3432 struct thread_info *tp;
3433 const struct btrace_config *conf;
3434
3435 if (args != NULL && *args != 0)
3436 error (_("Invalid argument."));
3437
3438 tp = find_thread_ptid (inferior_ptid);
3439 if (tp == NULL)
3440 error (_("No thread."));
3441
3442 btinfo = &tp->btrace;
3443
3444 conf = btrace_conf (btinfo);
3445 if (conf == NULL)
3446 error (_("No btrace configuration."));
3447
3448 printf_unfiltered (_("Format: %s.\n"),
3449 btrace_format_string (conf->format));
3450
3451 switch (conf->format)
3452 {
3453 default:
3454 break;
3455
3456 case BTRACE_FORMAT_BTS:
3457 printf_unfiltered (_("Number of packets: %u.\n"),
3458 VEC_length (btrace_block_s,
3459 btinfo->data.variant.bts.blocks));
3460 break;
3461
3462#if defined (HAVE_LIBIPT)
3463 case BTRACE_FORMAT_PT:
3464 {
3465 struct pt_version version;
3466
3467 version = pt_library_version ();
3468 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
3469 version.minor, version.build,
3470 version.ext != NULL ? version.ext : "");
3471
3472 btrace_maint_update_pt_packets (btinfo);
3473 printf_unfiltered (_("Number of packets: %u.\n"),
3474 VEC_length (btrace_pt_packet_s,
3475 btinfo->maint.variant.pt.packets));
3476 }
3477 break;
3478#endif /* defined (HAVE_LIBIPT) */
3479 }
3480}
3481
3482/* The "maint show btrace pt skip-pad" show value function. */
3483
3484static void
3485show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
3486 struct cmd_list_element *c,
3487 const char *value)
3488{
3489 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
3490}
3491
3492
3493/* Initialize btrace maintenance commands. */
3494
3495void _initialize_btrace (void);
3496void
3497_initialize_btrace (void)
3498{
3499 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
3500 _("Info about branch tracing data."), &maintenanceinfolist);
3501
3502 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
3503 _("Branch tracing maintenance commands."),
3504 &maint_btrace_cmdlist, "maintenance btrace ",
3505 0, &maintenancelist);
3506
3507 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
3508Set branch tracing specific variables."),
3509 &maint_btrace_set_cmdlist, "maintenance set btrace ",
3510 0, &maintenance_set_cmdlist);
3511
3512 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
bc504a31 3513Set Intel Processor Trace specific variables."),
b0627500
MM
3514 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
3515 0, &maint_btrace_set_cmdlist);
3516
3517 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
3518Show branch tracing specific variables."),
3519 &maint_btrace_show_cmdlist, "maintenance show btrace ",
3520 0, &maintenance_show_cmdlist);
3521
3522 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
bc504a31 3523Show Intel Processor Trace specific variables."),
b0627500
MM
3524 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
3525 0, &maint_btrace_show_cmdlist);
3526
3527 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
3528 &maint_btrace_pt_skip_pad, _("\
3529Set whether PAD packets should be skipped in the btrace packet history."), _("\
3530Show whether PAD packets should be skipped in the btrace packet history."),_("\
3531When enabled, PAD packets are ignored in the btrace packet history."),
3532 NULL, show_maint_btrace_pt_skip_pad,
3533 &maint_btrace_pt_set_cmdlist,
3534 &maint_btrace_pt_show_cmdlist);
3535
3536 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
3537 _("Print the raw branch tracing data.\n\
3538With no argument, print ten more packets after the previous ten-line print.\n\
3539With '-' as argument print ten packets before a previous ten-line print.\n\
3540One argument specifies the starting packet of a ten-line print.\n\
3541Two arguments with comma between specify starting and ending packets to \
3542print.\n\
3543Preceded with '+'/'-' the second argument specifies the distance from the \
3544first.\n"),
3545 &maint_btrace_cmdlist);
3546
3547 add_cmd ("clear-packet-history", class_maintenance,
3548 maint_btrace_clear_packet_history_cmd,
3549 _("Clears the branch tracing packet history.\n\
3550Discards the raw branch tracing data but not the execution history data.\n\
3551"),
3552 &maint_btrace_cmdlist);
3553
3554 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3555 _("Clears the branch tracing data.\n\
3556Discards the raw branch tracing data and the execution history data.\n\
3557The next 'record' command will fetch the branch tracing data anew.\n\
3558"),
3559 &maint_btrace_cmdlist);
3560
3561}