]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/btrace.c
btrace: fix gap indication
[thirdparty/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524 34#include "rsp-low.h"
b0627500
MM
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
b20a6524
MM
37
38#include <inttypes.h>
b0627500 39#include <ctype.h>
325fac50 40#include <algorithm>
b0627500
MM
41
42/* Command lists for btrace maintenance commands. */
43static struct cmd_list_element *maint_btrace_cmdlist;
44static struct cmd_list_element *maint_btrace_set_cmdlist;
45static struct cmd_list_element *maint_btrace_show_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
47static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
48
49/* Control whether to skip PAD packets when computing the packet history. */
50static int maint_btrace_pt_skip_pad = 1;
b20a6524
MM
51
52static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
53
54/* Print a record debug message. Use do ... while (0) to avoid ambiguities
55 when used in if statements. */
56
57#define DEBUG(msg, args...) \
58 do \
59 { \
60 if (record_debug != 0) \
61 fprintf_unfiltered (gdb_stdlog, \
62 "[btrace] " msg "\n", ##args); \
63 } \
64 while (0)
65
66#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
67
02d27625
MM
68/* Return the function name of a recorded function segment for printing.
69 This function never returns NULL. */
70
71static const char *
23a7fe75 72ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
73{
74 struct minimal_symbol *msym;
75 struct symbol *sym;
76
77 msym = bfun->msym;
78 sym = bfun->sym;
79
80 if (sym != NULL)
81 return SYMBOL_PRINT_NAME (sym);
82
83 if (msym != NULL)
efd66ac6 84 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
85
86 return "<unknown>";
87}
88
89/* Return the file name of a recorded function segment for printing.
90 This function never returns NULL. */
91
92static const char *
23a7fe75 93ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
94{
95 struct symbol *sym;
96 const char *filename;
97
98 sym = bfun->sym;
99
100 if (sym != NULL)
08be3fe3 101 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
102 else
103 filename = "<unknown>";
104
105 return filename;
106}
107
23a7fe75
MM
108/* Return a string representation of the address of an instruction.
109 This function never returns NULL. */
02d27625 110
23a7fe75
MM
111static const char *
112ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 113{
23a7fe75
MM
114 if (insn == NULL)
115 return "<nil>";
116
117 return core_addr_to_string_nz (insn->pc);
02d27625
MM
118}
119
23a7fe75 120/* Print an ftrace debug status message. */
02d27625
MM
121
122static void
23a7fe75 123ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 124{
23a7fe75
MM
125 const char *fun, *file;
126 unsigned int ibegin, iend;
ce0dfbea 127 int level;
23a7fe75
MM
128
129 fun = ftrace_print_function_name (bfun);
130 file = ftrace_print_filename (bfun);
131 level = bfun->level;
132
23a7fe75
MM
133 ibegin = bfun->insn_offset;
134 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
135
ce0dfbea
MM
136 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
137 prefix, fun, file, level, ibegin, iend);
02d27625
MM
138}
139
23a7fe75
MM
140/* Return non-zero if BFUN does not match MFUN and FUN,
141 return zero otherwise. */
02d27625
MM
142
143static int
23a7fe75
MM
144ftrace_function_switched (const struct btrace_function *bfun,
145 const struct minimal_symbol *mfun,
146 const struct symbol *fun)
02d27625
MM
147{
148 struct minimal_symbol *msym;
149 struct symbol *sym;
150
02d27625
MM
151 msym = bfun->msym;
152 sym = bfun->sym;
153
154 /* If the minimal symbol changed, we certainly switched functions. */
155 if (mfun != NULL && msym != NULL
efd66ac6 156 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
157 return 1;
158
159 /* If the symbol changed, we certainly switched functions. */
160 if (fun != NULL && sym != NULL)
161 {
162 const char *bfname, *fname;
163
164 /* Check the function name. */
165 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
166 return 1;
167
168 /* Check the location of those functions, as well. */
08be3fe3
DE
169 bfname = symtab_to_fullname (symbol_symtab (sym));
170 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
171 if (filename_cmp (fname, bfname) != 0)
172 return 1;
173 }
174
23a7fe75
MM
175 /* If we lost symbol information, we switched functions. */
176 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
177 return 1;
178
179 /* If we gained symbol information, we switched functions. */
180 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
181 return 1;
182
02d27625
MM
183 return 0;
184}
185
23a7fe75
MM
186/* Allocate and initialize a new branch trace function segment.
187 PREV is the chronologically preceding function segment.
188 MFUN and FUN are the symbol information we have for this function. */
189
190static struct btrace_function *
191ftrace_new_function (struct btrace_function *prev,
192 struct minimal_symbol *mfun,
193 struct symbol *fun)
194{
195 struct btrace_function *bfun;
196
8d749320 197 bfun = XCNEW (struct btrace_function);
23a7fe75
MM
198
199 bfun->msym = mfun;
200 bfun->sym = fun;
201 bfun->flow.prev = prev;
202
5de9129b
MM
203 if (prev == NULL)
204 {
205 /* Start counting at one. */
206 bfun->number = 1;
207 bfun->insn_offset = 1;
208 }
209 else
23a7fe75
MM
210 {
211 gdb_assert (prev->flow.next == NULL);
212 prev->flow.next = bfun;
02d27625 213
23a7fe75
MM
214 bfun->number = prev->number + 1;
215 bfun->insn_offset = (prev->insn_offset
216 + VEC_length (btrace_insn_s, prev->insn));
31fd9caa 217 bfun->level = prev->level;
23a7fe75
MM
218 }
219
220 return bfun;
02d27625
MM
221}
222
23a7fe75 223/* Update the UP field of a function segment. */
02d27625 224
23a7fe75
MM
225static void
226ftrace_update_caller (struct btrace_function *bfun,
227 struct btrace_function *caller,
228 enum btrace_function_flag flags)
02d27625 229{
23a7fe75
MM
230 if (bfun->up != NULL)
231 ftrace_debug (bfun, "updating caller");
02d27625 232
23a7fe75
MM
233 bfun->up = caller;
234 bfun->flags = flags;
235
236 ftrace_debug (bfun, "set caller");
237}
238
239/* Fix up the caller for all segments of a function. */
240
241static void
242ftrace_fixup_caller (struct btrace_function *bfun,
243 struct btrace_function *caller,
244 enum btrace_function_flag flags)
245{
246 struct btrace_function *prev, *next;
247
248 ftrace_update_caller (bfun, caller, flags);
249
250 /* Update all function segments belonging to the same function. */
251 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
252 ftrace_update_caller (prev, caller, flags);
253
254 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
255 ftrace_update_caller (next, caller, flags);
256}
257
258/* Add a new function segment for a call.
259 CALLER is the chronologically preceding function segment.
260 MFUN and FUN are the symbol information we have for this function. */
261
262static struct btrace_function *
263ftrace_new_call (struct btrace_function *caller,
264 struct minimal_symbol *mfun,
265 struct symbol *fun)
266{
267 struct btrace_function *bfun;
268
269 bfun = ftrace_new_function (caller, mfun, fun);
270 bfun->up = caller;
31fd9caa 271 bfun->level += 1;
23a7fe75
MM
272
273 ftrace_debug (bfun, "new call");
274
275 return bfun;
276}
277
278/* Add a new function segment for a tail call.
279 CALLER is the chronologically preceding function segment.
280 MFUN and FUN are the symbol information we have for this function. */
281
282static struct btrace_function *
283ftrace_new_tailcall (struct btrace_function *caller,
284 struct minimal_symbol *mfun,
285 struct symbol *fun)
286{
287 struct btrace_function *bfun;
02d27625 288
23a7fe75
MM
289 bfun = ftrace_new_function (caller, mfun, fun);
290 bfun->up = caller;
31fd9caa 291 bfun->level += 1;
23a7fe75 292 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 293
23a7fe75
MM
294 ftrace_debug (bfun, "new tail call");
295
296 return bfun;
297}
298
299/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
300 symbol information. */
301
302static struct btrace_function *
303ftrace_find_caller (struct btrace_function *bfun,
304 struct minimal_symbol *mfun,
305 struct symbol *fun)
306{
307 for (; bfun != NULL; bfun = bfun->up)
308 {
309 /* Skip functions with incompatible symbol information. */
310 if (ftrace_function_switched (bfun, mfun, fun))
311 continue;
312
313 /* This is the function segment we're looking for. */
314 break;
315 }
316
317 return bfun;
318}
319
320/* Find the innermost caller in the back trace of BFUN, skipping all
321 function segments that do not end with a call instruction (e.g.
322 tail calls ending with a jump). */
323
324static struct btrace_function *
7d5c24b3 325ftrace_find_call (struct btrace_function *bfun)
23a7fe75
MM
326{
327 for (; bfun != NULL; bfun = bfun->up)
02d27625 328 {
23a7fe75 329 struct btrace_insn *last;
02d27625 330
31fd9caa
MM
331 /* Skip gaps. */
332 if (bfun->errcode != 0)
333 continue;
23a7fe75
MM
334
335 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 336
7d5c24b3 337 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
338 break;
339 }
340
341 return bfun;
342}
343
344/* Add a continuation segment for a function into which we return.
345 PREV is the chronologically preceding function segment.
346 MFUN and FUN are the symbol information we have for this function. */
347
348static struct btrace_function *
7d5c24b3 349ftrace_new_return (struct btrace_function *prev,
23a7fe75
MM
350 struct minimal_symbol *mfun,
351 struct symbol *fun)
352{
353 struct btrace_function *bfun, *caller;
354
355 bfun = ftrace_new_function (prev, mfun, fun);
356
357 /* It is important to start at PREV's caller. Otherwise, we might find
358 PREV itself, if PREV is a recursive function. */
359 caller = ftrace_find_caller (prev->up, mfun, fun);
360 if (caller != NULL)
361 {
362 /* The caller of PREV is the preceding btrace function segment in this
363 function instance. */
364 gdb_assert (caller->segment.next == NULL);
365
366 caller->segment.next = bfun;
367 bfun->segment.prev = caller;
368
369 /* Maintain the function level. */
370 bfun->level = caller->level;
371
372 /* Maintain the call stack. */
373 bfun->up = caller->up;
374 bfun->flags = caller->flags;
375
376 ftrace_debug (bfun, "new return");
377 }
378 else
379 {
380 /* We did not find a caller. This could mean that something went
381 wrong or that the call is simply not included in the trace. */
02d27625 382
23a7fe75 383 /* Let's search for some actual call. */
7d5c24b3 384 caller = ftrace_find_call (prev->up);
23a7fe75 385 if (caller == NULL)
02d27625 386 {
23a7fe75
MM
387 /* There is no call in PREV's back trace. We assume that the
388 branch trace did not include it. */
389
390 /* Let's find the topmost call function - this skips tail calls. */
391 while (prev->up != NULL)
392 prev = prev->up;
02d27625 393
23a7fe75
MM
394 /* We maintain levels for a series of returns for which we have
395 not seen the calls.
396 We start at the preceding function's level in case this has
397 already been a return for which we have not seen the call.
398 We start at level 0 otherwise, to handle tail calls correctly. */
325fac50 399 bfun->level = std::min (0, prev->level) - 1;
23a7fe75
MM
400
401 /* Fix up the call stack for PREV. */
402 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
403
404 ftrace_debug (bfun, "new return - no caller");
405 }
406 else
02d27625 407 {
23a7fe75
MM
408 /* There is a call in PREV's back trace to which we should have
409 returned. Let's remain at this level. */
410 bfun->level = prev->level;
02d27625 411
23a7fe75 412 ftrace_debug (bfun, "new return - unknown caller");
02d27625 413 }
23a7fe75
MM
414 }
415
416 return bfun;
417}
418
419/* Add a new function segment for a function switch.
420 PREV is the chronologically preceding function segment.
421 MFUN and FUN are the symbol information we have for this function. */
422
423static struct btrace_function *
424ftrace_new_switch (struct btrace_function *prev,
425 struct minimal_symbol *mfun,
426 struct symbol *fun)
427{
428 struct btrace_function *bfun;
429
430 /* This is an unexplained function switch. The call stack will likely
431 be wrong at this point. */
432 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 433
23a7fe75
MM
434 ftrace_debug (bfun, "new switch");
435
436 return bfun;
437}
438
31fd9caa
MM
439/* Add a new function segment for a gap in the trace due to a decode error.
440 PREV is the chronologically preceding function segment.
441 ERRCODE is the format-specific error code. */
442
443static struct btrace_function *
444ftrace_new_gap (struct btrace_function *prev, int errcode)
445{
446 struct btrace_function *bfun;
447
448 /* We hijack prev if it was empty. */
449 if (prev != NULL && prev->errcode == 0
450 && VEC_empty (btrace_insn_s, prev->insn))
451 bfun = prev;
452 else
453 bfun = ftrace_new_function (prev, NULL, NULL);
454
455 bfun->errcode = errcode;
456
457 ftrace_debug (bfun, "new gap");
458
459 return bfun;
460}
461
23a7fe75
MM
462/* Update BFUN with respect to the instruction at PC. This may create new
463 function segments.
464 Return the chronologically latest function segment, never NULL. */
465
466static struct btrace_function *
7d5c24b3 467ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
23a7fe75
MM
468{
469 struct bound_minimal_symbol bmfun;
470 struct minimal_symbol *mfun;
471 struct symbol *fun;
472 struct btrace_insn *last;
473
474 /* Try to determine the function we're in. We use both types of symbols
475 to avoid surprises when we sometimes get a full symbol and sometimes
476 only a minimal symbol. */
477 fun = find_pc_function (pc);
478 bmfun = lookup_minimal_symbol_by_pc (pc);
479 mfun = bmfun.minsym;
480
481 if (fun == NULL && mfun == NULL)
482 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
483
31fd9caa
MM
484 /* If we didn't have a function or if we had a gap before, we create one. */
485 if (bfun == NULL || bfun->errcode != 0)
23a7fe75
MM
486 return ftrace_new_function (bfun, mfun, fun);
487
488 /* Check the last instruction, if we have one.
489 We do this check first, since it allows us to fill in the call stack
490 links in addition to the normal flow links. */
491 last = NULL;
492 if (!VEC_empty (btrace_insn_s, bfun->insn))
493 last = VEC_last (btrace_insn_s, bfun->insn);
494
495 if (last != NULL)
496 {
7d5c24b3
MM
497 switch (last->iclass)
498 {
499 case BTRACE_INSN_RETURN:
986b6601
MM
500 {
501 const char *fname;
502
503 /* On some systems, _dl_runtime_resolve returns to the resolved
504 function instead of jumping to it. From our perspective,
505 however, this is a tailcall.
506 If we treated it as return, we wouldn't be able to find the
507 resolved function in our stack back trace. Hence, we would
508 lose the current stack back trace and start anew with an empty
509 back trace. When the resolved function returns, we would then
510 create a stack back trace with the same function names but
511 different frame id's. This will confuse stepping. */
512 fname = ftrace_print_function_name (bfun);
513 if (strcmp (fname, "_dl_runtime_resolve") == 0)
514 return ftrace_new_tailcall (bfun, mfun, fun);
515
516 return ftrace_new_return (bfun, mfun, fun);
517 }
23a7fe75 518
7d5c24b3
MM
519 case BTRACE_INSN_CALL:
520 /* Ignore calls to the next instruction. They are used for PIC. */
521 if (last->pc + last->size == pc)
522 break;
23a7fe75 523
7d5c24b3 524 return ftrace_new_call (bfun, mfun, fun);
23a7fe75 525
7d5c24b3
MM
526 case BTRACE_INSN_JUMP:
527 {
528 CORE_ADDR start;
23a7fe75 529
7d5c24b3 530 start = get_pc_function_start (pc);
23a7fe75 531
7d5c24b3
MM
532 /* If we can't determine the function for PC, we treat a jump at
533 the end of the block as tail call. */
534 if (start == 0 || start == pc)
535 return ftrace_new_tailcall (bfun, mfun, fun);
536 }
02d27625 537 }
23a7fe75
MM
538 }
539
540 /* Check if we're switching functions for some other reason. */
541 if (ftrace_function_switched (bfun, mfun, fun))
542 {
543 DEBUG_FTRACE ("switching from %s in %s at %s",
544 ftrace_print_insn_addr (last),
545 ftrace_print_function_name (bfun),
546 ftrace_print_filename (bfun));
02d27625 547
23a7fe75
MM
548 return ftrace_new_switch (bfun, mfun, fun);
549 }
550
551 return bfun;
552}
553
23a7fe75
MM
554/* Add the instruction at PC to BFUN's instructions. */
555
556static void
7d5c24b3
MM
557ftrace_update_insns (struct btrace_function *bfun,
558 const struct btrace_insn *insn)
23a7fe75 559{
7d5c24b3 560 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
561
562 if (record_debug > 1)
563 ftrace_debug (bfun, "update insn");
564}
565
7d5c24b3
MM
566/* Classify the instruction at PC. */
567
568static enum btrace_insn_class
569ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
570{
7d5c24b3
MM
571 enum btrace_insn_class iclass;
572
573 iclass = BTRACE_INSN_OTHER;
492d29ea 574 TRY
7d5c24b3
MM
575 {
576 if (gdbarch_insn_is_call (gdbarch, pc))
577 iclass = BTRACE_INSN_CALL;
578 else if (gdbarch_insn_is_ret (gdbarch, pc))
579 iclass = BTRACE_INSN_RETURN;
580 else if (gdbarch_insn_is_jump (gdbarch, pc))
581 iclass = BTRACE_INSN_JUMP;
582 }
492d29ea
PA
583 CATCH (error, RETURN_MASK_ERROR)
584 {
585 }
586 END_CATCH
7d5c24b3
MM
587
588 return iclass;
589}
590
734b0e4b 591/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
592
593static void
76235df1 594btrace_compute_ftrace_bts (struct thread_info *tp,
734b0e4b 595 const struct btrace_data_bts *btrace)
23a7fe75 596{
76235df1 597 struct btrace_thread_info *btinfo;
23a7fe75
MM
598 struct btrace_function *begin, *end;
599 struct gdbarch *gdbarch;
31fd9caa 600 unsigned int blk, ngaps;
23a7fe75
MM
601 int level;
602
23a7fe75 603 gdbarch = target_gdbarch ();
76235df1 604 btinfo = &tp->btrace;
969c39fb
MM
605 begin = btinfo->begin;
606 end = btinfo->end;
31fd9caa 607 ngaps = btinfo->ngaps;
969c39fb 608 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 609 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
610
611 while (blk != 0)
612 {
613 btrace_block_s *block;
614 CORE_ADDR pc;
615
616 blk -= 1;
617
734b0e4b 618 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
619 pc = block->begin;
620
621 for (;;)
622 {
7d5c24b3 623 struct btrace_insn insn;
23a7fe75
MM
624 int size;
625
626 /* We should hit the end of the block. Warn if we went too far. */
627 if (block->end < pc)
628 {
31fd9caa
MM
629 /* Indicate the gap in the trace - unless we're at the
630 beginning. */
631 if (begin != NULL)
632 {
31fd9caa
MM
633 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
634 ngaps += 1;
63ab433e
MM
635
636 warning (_("Recorded trace may be corrupted at instruction "
637 "%u (pc = %s)."), end->insn_offset - 1,
638 core_addr_to_string_nz (pc));
31fd9caa 639 }
23a7fe75
MM
640 break;
641 }
642
7d5c24b3 643 end = ftrace_update_function (end, pc);
23a7fe75
MM
644 if (begin == NULL)
645 begin = end;
646
8710b709
MM
647 /* Maintain the function level offset.
648 For all but the last block, we do it here. */
649 if (blk != 0)
325fac50 650 level = std::min (level, end->level);
23a7fe75 651
7d5c24b3 652 size = 0;
492d29ea
PA
653 TRY
654 {
655 size = gdb_insn_length (gdbarch, pc);
656 }
657 CATCH (error, RETURN_MASK_ERROR)
658 {
659 }
660 END_CATCH
7d5c24b3
MM
661
662 insn.pc = pc;
663 insn.size = size;
664 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 665 insn.flags = 0;
7d5c24b3
MM
666
667 ftrace_update_insns (end, &insn);
23a7fe75
MM
668
669 /* We're done once we pushed the instruction at the end. */
670 if (block->end == pc)
671 break;
672
7d5c24b3 673 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
674 if (size <= 0)
675 {
31fd9caa
MM
676 /* Indicate the gap in the trace. We just added INSN so we're
677 not at the beginning. */
678 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
679 ngaps += 1;
680
63ab433e
MM
681 warning (_("Recorded trace may be incomplete at instruction %u "
682 "(pc = %s)."), end->insn_offset - 1,
683 core_addr_to_string_nz (pc));
684
23a7fe75
MM
685 break;
686 }
687
688 pc += size;
8710b709
MM
689
690 /* Maintain the function level offset.
691 For the last block, we do it here to not consider the last
692 instruction.
693 Since the last instruction corresponds to the current instruction
694 and is not really part of the execution history, it shouldn't
695 affect the level. */
696 if (blk == 0)
325fac50 697 level = std::min (level, end->level);
23a7fe75 698 }
02d27625
MM
699 }
700
23a7fe75
MM
701 btinfo->begin = begin;
702 btinfo->end = end;
31fd9caa 703 btinfo->ngaps = ngaps;
23a7fe75
MM
704
705 /* LEVEL is the minimal function level of all btrace function segments.
706 Define the global level offset to -LEVEL so all function levels are
707 normalized to start at zero. */
708 btinfo->level = -level;
02d27625
MM
709}
710
b20a6524
MM
711#if defined (HAVE_LIBIPT)
712
713static enum btrace_insn_class
714pt_reclassify_insn (enum pt_insn_class iclass)
715{
716 switch (iclass)
717 {
718 case ptic_call:
719 return BTRACE_INSN_CALL;
720
721 case ptic_return:
722 return BTRACE_INSN_RETURN;
723
724 case ptic_jump:
725 return BTRACE_INSN_JUMP;
726
727 default:
728 return BTRACE_INSN_OTHER;
729 }
730}
731
da8c46d2
MM
732/* Return the btrace instruction flags for INSN. */
733
d7abe101 734static btrace_insn_flags
da8c46d2
MM
735pt_btrace_insn_flags (const struct pt_insn *insn)
736{
d7abe101 737 btrace_insn_flags flags = 0;
da8c46d2
MM
738
739 if (insn->speculative)
740 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
741
742 return flags;
743}
744
b20a6524
MM
745/* Add function branch trace using DECODER. */
746
747static void
748ftrace_add_pt (struct pt_insn_decoder *decoder,
749 struct btrace_function **pbegin,
750 struct btrace_function **pend, int *plevel,
751 unsigned int *ngaps)
752{
753 struct btrace_function *begin, *end, *upd;
754 uint64_t offset;
63ab433e 755 int errcode;
b20a6524
MM
756
757 begin = *pbegin;
758 end = *pend;
b20a6524
MM
759 for (;;)
760 {
761 struct btrace_insn btinsn;
762 struct pt_insn insn;
763
764 errcode = pt_insn_sync_forward (decoder);
765 if (errcode < 0)
766 {
767 if (errcode != -pte_eos)
bc504a31 768 warning (_("Failed to synchronize onto the Intel Processor "
b20a6524
MM
769 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
770 break;
771 }
772
773 memset (&btinsn, 0, sizeof (btinsn));
774 for (;;)
775 {
776 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
777 if (errcode < 0)
778 break;
779
780 /* Look for gaps in the trace - unless we're at the beginning. */
781 if (begin != NULL)
782 {
783 /* Tracing is disabled and re-enabled each time we enter the
784 kernel. Most times, we continue from the same instruction we
785 stopped before. This is indicated via the RESUMED instruction
786 flag. The ENABLED instruction flag means that we continued
787 from some other instruction. Indicate this as a trace gap. */
788 if (insn.enabled)
63ab433e
MM
789 {
790 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
791 *ngaps += 1;
792
793 pt_insn_get_offset (decoder, &offset);
794
795 warning (_("Non-contiguous trace at instruction %u (offset "
796 "= 0x%" PRIx64 ", pc = 0x%" PRIx64 ")."),
797 end->insn_offset - 1, offset, insn.ip);
798 }
b20a6524
MM
799
800 /* Indicate trace overflows. */
801 if (insn.resynced)
63ab433e
MM
802 {
803 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
804 *ngaps += 1;
805
806 pt_insn_get_offset (decoder, &offset);
807
808 warning (_("Overflow at instruction %u (offset = 0x%" PRIx64
809 ", pc = 0x%" PRIx64 ")."), end->insn_offset - 1,
810 offset, insn.ip);
811 }
b20a6524
MM
812 }
813
814 upd = ftrace_update_function (end, insn.ip);
815 if (upd != end)
816 {
817 *pend = end = upd;
818
819 if (begin == NULL)
820 *pbegin = begin = upd;
821 }
822
823 /* Maintain the function level offset. */
325fac50 824 *plevel = std::min (*plevel, end->level);
b20a6524
MM
825
826 btinsn.pc = (CORE_ADDR) insn.ip;
827 btinsn.size = (gdb_byte) insn.size;
828 btinsn.iclass = pt_reclassify_insn (insn.iclass);
da8c46d2 829 btinsn.flags = pt_btrace_insn_flags (&insn);
b20a6524
MM
830
831 ftrace_update_insns (end, &btinsn);
832 }
833
834 if (errcode == -pte_eos)
835 break;
836
837 /* If the gap is at the very beginning, we ignore it - we will have
838 less trace, but we won't have any holes in the trace. */
839 if (begin == NULL)
840 continue;
841
b20a6524
MM
842 /* Indicate the gap in the trace. */
843 *pend = end = ftrace_new_gap (end, errcode);
844 *ngaps += 1;
b20a6524 845
63ab433e
MM
846 pt_insn_get_offset (decoder, &offset);
847
848 warning (_("Decode error (%d) at instruction %u (offset = 0x%" PRIx64
849 ", pc = 0x%" PRIx64 "): %s."), errcode, end->insn_offset - 1,
850 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
851 }
b20a6524
MM
852}
853
854/* A callback function to allow the trace decoder to read the inferior's
855 memory. */
856
857static int
858btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 859 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
860 void *context)
861{
43368e1d 862 int result, errcode;
b20a6524 863
43368e1d 864 result = (int) size;
b20a6524
MM
865 TRY
866 {
80a2b330 867 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 868 if (errcode != 0)
43368e1d 869 result = -pte_nomap;
b20a6524
MM
870 }
871 CATCH (error, RETURN_MASK_ERROR)
872 {
43368e1d 873 result = -pte_nomap;
b20a6524
MM
874 }
875 END_CATCH
876
43368e1d 877 return result;
b20a6524
MM
878}
879
880/* Translate the vendor from one enum to another. */
881
882static enum pt_cpu_vendor
883pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
884{
885 switch (vendor)
886 {
887 default:
888 return pcv_unknown;
889
890 case CV_INTEL:
891 return pcv_intel;
892 }
893}
894
895/* Finalize the function branch trace after decode. */
896
897static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
898 struct thread_info *tp, int level)
899{
900 pt_insn_free_decoder (decoder);
901
902 /* LEVEL is the minimal function level of all btrace function segments.
903 Define the global level offset to -LEVEL so all function levels are
904 normalized to start at zero. */
905 tp->btrace.level = -level;
906
907 /* Add a single last instruction entry for the current PC.
908 This allows us to compute the backtrace at the current PC using both
909 standard unwind and btrace unwind.
910 This extra entry is ignored by all record commands. */
911 btrace_add_pc (tp);
912}
913
bc504a31
PA
914/* Compute the function branch trace from Intel Processor Trace
915 format. */
b20a6524
MM
916
917static void
918btrace_compute_ftrace_pt (struct thread_info *tp,
919 const struct btrace_data_pt *btrace)
920{
921 struct btrace_thread_info *btinfo;
922 struct pt_insn_decoder *decoder;
923 struct pt_config config;
924 int level, errcode;
925
926 if (btrace->size == 0)
927 return;
928
929 btinfo = &tp->btrace;
930 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
931
932 pt_config_init(&config);
933 config.begin = btrace->data;
934 config.end = btrace->data + btrace->size;
935
936 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
937 config.cpu.family = btrace->config.cpu.family;
938 config.cpu.model = btrace->config.cpu.model;
939 config.cpu.stepping = btrace->config.cpu.stepping;
940
941 errcode = pt_cpu_errata (&config.errata, &config.cpu);
942 if (errcode < 0)
bc504a31 943 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b20a6524
MM
944 pt_errstr (pt_errcode (errcode)));
945
946 decoder = pt_insn_alloc_decoder (&config);
947 if (decoder == NULL)
bc504a31 948 error (_("Failed to allocate the Intel Processor Trace decoder."));
b20a6524
MM
949
950 TRY
951 {
952 struct pt_image *image;
953
954 image = pt_insn_get_image(decoder);
955 if (image == NULL)
bc504a31 956 error (_("Failed to configure the Intel Processor Trace decoder."));
b20a6524
MM
957
958 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
959 if (errcode < 0)
bc504a31 960 error (_("Failed to configure the Intel Processor Trace decoder: "
b20a6524
MM
961 "%s."), pt_errstr (pt_errcode (errcode)));
962
963 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
964 &btinfo->ngaps);
965 }
966 CATCH (error, RETURN_MASK_ALL)
967 {
968 /* Indicate a gap in the trace if we quit trace processing. */
969 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
970 {
971 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
972 btinfo->ngaps++;
973 }
974
975 btrace_finalize_ftrace_pt (decoder, tp, level);
976
977 throw_exception (error);
978 }
979 END_CATCH
980
981 btrace_finalize_ftrace_pt (decoder, tp, level);
982}
983
984#else /* defined (HAVE_LIBIPT) */
985
986static void
987btrace_compute_ftrace_pt (struct thread_info *tp,
988 const struct btrace_data_pt *btrace)
989{
990 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
991}
992
993#endif /* defined (HAVE_LIBIPT) */
994
734b0e4b
MM
995/* Compute the function branch trace from a block branch trace BTRACE for
996 a thread given by BTINFO. */
997
998static void
76235df1 999btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
734b0e4b
MM
1000{
1001 DEBUG ("compute ftrace");
1002
1003 switch (btrace->format)
1004 {
1005 case BTRACE_FORMAT_NONE:
1006 return;
1007
1008 case BTRACE_FORMAT_BTS:
76235df1 1009 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
734b0e4b 1010 return;
b20a6524
MM
1011
1012 case BTRACE_FORMAT_PT:
1013 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
1014 return;
734b0e4b
MM
1015 }
1016
1017 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1018}
1019
6e07b1d2
MM
1020/* Add an entry for the current PC. */
1021
1022static void
1023btrace_add_pc (struct thread_info *tp)
1024{
734b0e4b 1025 struct btrace_data btrace;
6e07b1d2
MM
1026 struct btrace_block *block;
1027 struct regcache *regcache;
1028 struct cleanup *cleanup;
1029 CORE_ADDR pc;
1030
1031 regcache = get_thread_regcache (tp->ptid);
1032 pc = regcache_read_pc (regcache);
1033
734b0e4b
MM
1034 btrace_data_init (&btrace);
1035 btrace.format = BTRACE_FORMAT_BTS;
1036 btrace.variant.bts.blocks = NULL;
6e07b1d2 1037
734b0e4b
MM
1038 cleanup = make_cleanup_btrace_data (&btrace);
1039
1040 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
1041 block->begin = pc;
1042 block->end = pc;
1043
76235df1 1044 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
1045
1046 do_cleanups (cleanup);
1047}
1048
02d27625
MM
1049/* See btrace.h. */
1050
1051void
f4abbc16 1052btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1053{
1054 if (tp->btrace.target != NULL)
1055 return;
1056
46a3515b
MM
1057#if !defined (HAVE_LIBIPT)
1058 if (conf->format == BTRACE_FORMAT_PT)
bc504a31 1059 error (_("GDB does not support Intel Processor Trace."));
46a3515b
MM
1060#endif /* !defined (HAVE_LIBIPT) */
1061
f4abbc16 1062 if (!target_supports_btrace (conf->format))
02d27625
MM
1063 error (_("Target does not support branch tracing."));
1064
43792cf0
PA
1065 DEBUG ("enable thread %s (%s)", print_thread_id (tp),
1066 target_pid_to_str (tp->ptid));
02d27625 1067
f4abbc16 1068 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2
MM
1069
1070 /* Add an entry for the current PC so we start tracing from where we
1071 enabled it. */
1072 if (tp->btrace.target != NULL)
1073 btrace_add_pc (tp);
02d27625
MM
1074}
1075
1076/* See btrace.h. */
1077
f4abbc16
MM
1078const struct btrace_config *
1079btrace_conf (const struct btrace_thread_info *btinfo)
1080{
1081 if (btinfo->target == NULL)
1082 return NULL;
1083
1084 return target_btrace_conf (btinfo->target);
1085}
1086
1087/* See btrace.h. */
1088
02d27625
MM
1089void
1090btrace_disable (struct thread_info *tp)
1091{
1092 struct btrace_thread_info *btp = &tp->btrace;
1093 int errcode = 0;
1094
1095 if (btp->target == NULL)
1096 return;
1097
43792cf0
PA
1098 DEBUG ("disable thread %s (%s)", print_thread_id (tp),
1099 target_pid_to_str (tp->ptid));
02d27625
MM
1100
1101 target_disable_btrace (btp->target);
1102 btp->target = NULL;
1103
1104 btrace_clear (tp);
1105}
1106
1107/* See btrace.h. */
1108
1109void
1110btrace_teardown (struct thread_info *tp)
1111{
1112 struct btrace_thread_info *btp = &tp->btrace;
1113 int errcode = 0;
1114
1115 if (btp->target == NULL)
1116 return;
1117
43792cf0
PA
1118 DEBUG ("teardown thread %s (%s)", print_thread_id (tp),
1119 target_pid_to_str (tp->ptid));
02d27625
MM
1120
1121 target_teardown_btrace (btp->target);
1122 btp->target = NULL;
1123
1124 btrace_clear (tp);
1125}
1126
734b0e4b 1127/* Stitch branch trace in BTS format. */
969c39fb
MM
1128
1129static int
31fd9caa 1130btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1131{
31fd9caa 1132 struct btrace_thread_info *btinfo;
969c39fb
MM
1133 struct btrace_function *last_bfun;
1134 struct btrace_insn *last_insn;
1135 btrace_block_s *first_new_block;
1136
31fd9caa 1137 btinfo = &tp->btrace;
969c39fb
MM
1138 last_bfun = btinfo->end;
1139 gdb_assert (last_bfun != NULL);
31fd9caa
MM
1140 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1141
1142 /* If the existing trace ends with a gap, we just glue the traces
1143 together. We need to drop the last (i.e. chronologically first) block
1144 of the new trace, though, since we can't fill in the start address.*/
1145 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1146 {
1147 VEC_pop (btrace_block_s, btrace->blocks);
1148 return 0;
1149 }
969c39fb
MM
1150
1151 /* Beware that block trace starts with the most recent block, so the
1152 chronologically first block in the new trace is the last block in
1153 the new trace's block vector. */
734b0e4b 1154 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1155 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1156
1157 /* If the current PC at the end of the block is the same as in our current
1158 trace, there are two explanations:
1159 1. we executed the instruction and some branch brought us back.
1160 2. we have not made any progress.
1161 In the first case, the delta trace vector should contain at least two
1162 entries.
1163 In the second case, the delta trace vector should contain exactly one
1164 entry for the partial block containing the current PC. Remove it. */
1165 if (first_new_block->end == last_insn->pc
734b0e4b 1166 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1167 {
734b0e4b 1168 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1169 return 0;
1170 }
1171
1172 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1173 core_addr_to_string_nz (first_new_block->end));
1174
1175 /* Do a simple sanity check to make sure we don't accidentally end up
1176 with a bad block. This should not occur in practice. */
1177 if (first_new_block->end < last_insn->pc)
1178 {
1179 warning (_("Error while trying to read delta trace. Falling back to "
1180 "a full read."));
1181 return -1;
1182 }
1183
1184 /* We adjust the last block to start at the end of our current trace. */
1185 gdb_assert (first_new_block->begin == 0);
1186 first_new_block->begin = last_insn->pc;
1187
1188 /* We simply pop the last insn so we can insert it again as part of
1189 the normal branch trace computation.
1190 Since instruction iterators are based on indices in the instructions
1191 vector, we don't leave any pointers dangling. */
1192 DEBUG ("pruning insn at %s for stitching",
1193 ftrace_print_insn_addr (last_insn));
1194
1195 VEC_pop (btrace_insn_s, last_bfun->insn);
1196
1197 /* The instructions vector may become empty temporarily if this has
1198 been the only instruction in this function segment.
1199 This violates the invariant but will be remedied shortly by
1200 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1201
1202 /* The only case where this would hurt is if the entire trace consisted
1203 of just that one instruction. If we remove it, we might turn the now
1204 empty btrace function segment into a gap. But we don't want gaps at
1205 the beginning. To avoid this, we remove the entire old trace. */
1206 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1207 btrace_clear (tp);
1208
969c39fb
MM
1209 return 0;
1210}
1211
734b0e4b
MM
1212/* Adjust the block trace in order to stitch old and new trace together.
1213 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1214 TP is the traced thread.
1215 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1216 Return 0 on success, -1 otherwise. */
1217
1218static int
31fd9caa 1219btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1220{
1221 /* If we don't have trace, there's nothing to do. */
1222 if (btrace_data_empty (btrace))
1223 return 0;
1224
1225 switch (btrace->format)
1226 {
1227 case BTRACE_FORMAT_NONE:
1228 return 0;
1229
1230 case BTRACE_FORMAT_BTS:
31fd9caa 1231 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1232
1233 case BTRACE_FORMAT_PT:
1234 /* Delta reads are not supported. */
1235 return -1;
734b0e4b
MM
1236 }
1237
1238 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1239}
1240
969c39fb
MM
1241/* Clear the branch trace histories in BTINFO. */
1242
1243static void
1244btrace_clear_history (struct btrace_thread_info *btinfo)
1245{
1246 xfree (btinfo->insn_history);
1247 xfree (btinfo->call_history);
1248 xfree (btinfo->replay);
1249
1250 btinfo->insn_history = NULL;
1251 btinfo->call_history = NULL;
1252 btinfo->replay = NULL;
1253}
1254
b0627500
MM
1255/* Clear the branch trace maintenance histories in BTINFO. */
1256
1257static void
1258btrace_maint_clear (struct btrace_thread_info *btinfo)
1259{
1260 switch (btinfo->data.format)
1261 {
1262 default:
1263 break;
1264
1265 case BTRACE_FORMAT_BTS:
1266 btinfo->maint.variant.bts.packet_history.begin = 0;
1267 btinfo->maint.variant.bts.packet_history.end = 0;
1268 break;
1269
1270#if defined (HAVE_LIBIPT)
1271 case BTRACE_FORMAT_PT:
1272 xfree (btinfo->maint.variant.pt.packets);
1273
1274 btinfo->maint.variant.pt.packets = NULL;
1275 btinfo->maint.variant.pt.packet_history.begin = 0;
1276 btinfo->maint.variant.pt.packet_history.end = 0;
1277 break;
1278#endif /* defined (HAVE_LIBIPT) */
1279 }
1280}
1281
02d27625
MM
1282/* See btrace.h. */
1283
1284void
1285btrace_fetch (struct thread_info *tp)
1286{
1287 struct btrace_thread_info *btinfo;
969c39fb 1288 struct btrace_target_info *tinfo;
734b0e4b 1289 struct btrace_data btrace;
23a7fe75 1290 struct cleanup *cleanup;
969c39fb 1291 int errcode;
02d27625 1292
43792cf0
PA
1293 DEBUG ("fetch thread %s (%s)", print_thread_id (tp),
1294 target_pid_to_str (tp->ptid));
02d27625
MM
1295
1296 btinfo = &tp->btrace;
969c39fb
MM
1297 tinfo = btinfo->target;
1298 if (tinfo == NULL)
1299 return;
1300
1301 /* There's no way we could get new trace while replaying.
1302 On the other hand, delta trace would return a partial record with the
1303 current PC, which is the replay PC, not the last PC, as expected. */
1304 if (btinfo->replay != NULL)
02d27625
MM
1305 return;
1306
734b0e4b
MM
1307 btrace_data_init (&btrace);
1308 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 1309
969c39fb
MM
1310 /* Let's first try to extend the trace we already have. */
1311 if (btinfo->end != NULL)
1312 {
1313 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1314 if (errcode == 0)
1315 {
1316 /* Success. Let's try to stitch the traces together. */
31fd9caa 1317 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1318 }
1319 else
1320 {
1321 /* We failed to read delta trace. Let's try to read new trace. */
1322 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1323
1324 /* If we got any new trace, discard what we have. */
734b0e4b 1325 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1326 btrace_clear (tp);
1327 }
1328
1329 /* If we were not able to read the trace, we start over. */
1330 if (errcode != 0)
1331 {
1332 btrace_clear (tp);
1333 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1334 }
1335 }
1336 else
1337 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1338
1339 /* If we were not able to read the branch trace, signal an error. */
1340 if (errcode != 0)
1341 error (_("Failed to read branch trace."));
1342
1343 /* Compute the trace, provided we have any. */
734b0e4b 1344 if (!btrace_data_empty (&btrace))
23a7fe75 1345 {
9be54cae
MM
1346 /* Store the raw trace data. The stored data will be cleared in
1347 btrace_clear, so we always append the new trace. */
1348 btrace_data_append (&btinfo->data, &btrace);
b0627500 1349 btrace_maint_clear (btinfo);
9be54cae 1350
969c39fb 1351 btrace_clear_history (btinfo);
76235df1 1352 btrace_compute_ftrace (tp, &btrace);
23a7fe75 1353 }
02d27625 1354
23a7fe75 1355 do_cleanups (cleanup);
02d27625
MM
1356}
1357
1358/* See btrace.h. */
1359
1360void
1361btrace_clear (struct thread_info *tp)
1362{
1363 struct btrace_thread_info *btinfo;
23a7fe75 1364 struct btrace_function *it, *trash;
02d27625 1365
43792cf0
PA
1366 DEBUG ("clear thread %s (%s)", print_thread_id (tp),
1367 target_pid_to_str (tp->ptid));
02d27625 1368
0b722aec
MM
1369 /* Make sure btrace frames that may hold a pointer into the branch
1370 trace data are destroyed. */
1371 reinit_frame_cache ();
1372
02d27625
MM
1373 btinfo = &tp->btrace;
1374
23a7fe75
MM
1375 it = btinfo->begin;
1376 while (it != NULL)
1377 {
1378 trash = it;
1379 it = it->flow.next;
02d27625 1380
23a7fe75
MM
1381 xfree (trash);
1382 }
1383
1384 btinfo->begin = NULL;
1385 btinfo->end = NULL;
31fd9caa 1386 btinfo->ngaps = 0;
23a7fe75 1387
b0627500
MM
1388 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1389 btrace_maint_clear (btinfo);
9be54cae 1390 btrace_data_clear (&btinfo->data);
969c39fb 1391 btrace_clear_history (btinfo);
02d27625
MM
1392}
1393
1394/* See btrace.h. */
1395
1396void
1397btrace_free_objfile (struct objfile *objfile)
1398{
1399 struct thread_info *tp;
1400
1401 DEBUG ("free objfile");
1402
034f788c 1403 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1404 btrace_clear (tp);
1405}
c12a2917
MM
1406
1407#if defined (HAVE_LIBEXPAT)
1408
1409/* Check the btrace document version. */
1410
1411static void
1412check_xml_btrace_version (struct gdb_xml_parser *parser,
1413 const struct gdb_xml_element *element,
1414 void *user_data, VEC (gdb_xml_value_s) *attributes)
1415{
9a3c8263
SM
1416 const char *version
1417 = (const char *) xml_find_attribute (attributes, "version")->value;
c12a2917
MM
1418
1419 if (strcmp (version, "1.0") != 0)
1420 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1421}
1422
1423/* Parse a btrace "block" xml record. */
1424
1425static void
1426parse_xml_btrace_block (struct gdb_xml_parser *parser,
1427 const struct gdb_xml_element *element,
1428 void *user_data, VEC (gdb_xml_value_s) *attributes)
1429{
734b0e4b 1430 struct btrace_data *btrace;
c12a2917
MM
1431 struct btrace_block *block;
1432 ULONGEST *begin, *end;
1433
9a3c8263 1434 btrace = (struct btrace_data *) user_data;
734b0e4b
MM
1435
1436 switch (btrace->format)
1437 {
1438 case BTRACE_FORMAT_BTS:
1439 break;
1440
1441 case BTRACE_FORMAT_NONE:
1442 btrace->format = BTRACE_FORMAT_BTS;
1443 btrace->variant.bts.blocks = NULL;
1444 break;
1445
1446 default:
1447 gdb_xml_error (parser, _("Btrace format error."));
1448 }
c12a2917 1449
bc84451b
SM
1450 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1451 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
c12a2917 1452
734b0e4b 1453 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1454 block->begin = *begin;
1455 block->end = *end;
1456}
1457
b20a6524
MM
1458/* Parse a "raw" xml record. */
1459
1460static void
1461parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
e7b01ce0 1462 gdb_byte **pdata, size_t *psize)
b20a6524
MM
1463{
1464 struct cleanup *cleanup;
1465 gdb_byte *data, *bin;
e7b01ce0 1466 size_t len, size;
b20a6524
MM
1467
1468 len = strlen (body_text);
e7b01ce0 1469 if (len % 2 != 0)
b20a6524
MM
1470 gdb_xml_error (parser, _("Bad raw data size."));
1471
e7b01ce0
MM
1472 size = len / 2;
1473
224c3ddb 1474 bin = data = (gdb_byte *) xmalloc (size);
b20a6524
MM
1475 cleanup = make_cleanup (xfree, data);
1476
1477 /* We use hex encoding - see common/rsp-low.h. */
1478 while (len > 0)
1479 {
1480 char hi, lo;
1481
1482 hi = *body_text++;
1483 lo = *body_text++;
1484
1485 if (hi == 0 || lo == 0)
1486 gdb_xml_error (parser, _("Bad hex encoding."));
1487
1488 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1489 len -= 2;
1490 }
1491
1492 discard_cleanups (cleanup);
1493
1494 *pdata = data;
1495 *psize = size;
1496}
1497
1498/* Parse a btrace pt-config "cpu" xml record. */
1499
1500static void
1501parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1502 const struct gdb_xml_element *element,
1503 void *user_data,
1504 VEC (gdb_xml_value_s) *attributes)
1505{
1506 struct btrace_data *btrace;
1507 const char *vendor;
1508 ULONGEST *family, *model, *stepping;
1509
9a3c8263
SM
1510 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1511 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1512 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1513 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
b20a6524 1514
9a3c8263 1515 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1516
1517 if (strcmp (vendor, "GenuineIntel") == 0)
1518 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1519
1520 btrace->variant.pt.config.cpu.family = *family;
1521 btrace->variant.pt.config.cpu.model = *model;
1522 btrace->variant.pt.config.cpu.stepping = *stepping;
1523}
1524
1525/* Parse a btrace pt "raw" xml record. */
1526
1527static void
1528parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1529 const struct gdb_xml_element *element,
1530 void *user_data, const char *body_text)
1531{
1532 struct btrace_data *btrace;
1533
9a3c8263 1534 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1535 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1536 &btrace->variant.pt.size);
1537}
1538
1539/* Parse a btrace "pt" xml record. */
1540
1541static void
1542parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1543 const struct gdb_xml_element *element,
1544 void *user_data, VEC (gdb_xml_value_s) *attributes)
1545{
1546 struct btrace_data *btrace;
1547
9a3c8263 1548 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1549 btrace->format = BTRACE_FORMAT_PT;
1550 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1551 btrace->variant.pt.data = NULL;
1552 btrace->variant.pt.size = 0;
1553}
1554
c12a2917
MM
1555static const struct gdb_xml_attribute block_attributes[] = {
1556 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1557 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1558 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1559};
1560
b20a6524
MM
1561static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1562 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1563 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1564 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1565 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1566 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1567};
1568
1569static const struct gdb_xml_element btrace_pt_config_children[] = {
1570 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1571 parse_xml_btrace_pt_config_cpu, NULL },
1572 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1573};
1574
1575static const struct gdb_xml_element btrace_pt_children[] = {
1576 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1577 NULL },
1578 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1579 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1580};
1581
c12a2917
MM
1582static const struct gdb_xml_attribute btrace_attributes[] = {
1583 { "version", GDB_XML_AF_NONE, NULL, NULL },
1584 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1585};
1586
1587static const struct gdb_xml_element btrace_children[] = {
1588 { "block", block_attributes, NULL,
1589 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
1590 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1591 NULL },
c12a2917
MM
1592 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1593};
1594
1595static const struct gdb_xml_element btrace_elements[] = {
1596 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1597 check_xml_btrace_version, NULL },
1598 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1599};
1600
1601#endif /* defined (HAVE_LIBEXPAT) */
1602
1603/* See btrace.h. */
1604
734b0e4b
MM
1605void
1606parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 1607{
c12a2917
MM
1608 struct cleanup *cleanup;
1609 int errcode;
1610
1611#if defined (HAVE_LIBEXPAT)
1612
734b0e4b
MM
1613 btrace->format = BTRACE_FORMAT_NONE;
1614
1615 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 1616 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 1617 buffer, btrace);
c12a2917 1618 if (errcode != 0)
969c39fb 1619 error (_("Error parsing branch trace."));
c12a2917
MM
1620
1621 /* Keep parse results. */
1622 discard_cleanups (cleanup);
1623
1624#else /* !defined (HAVE_LIBEXPAT) */
1625
1626 error (_("Cannot process branch trace. XML parsing is not supported."));
1627
1628#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 1629}
23a7fe75 1630
f4abbc16
MM
1631#if defined (HAVE_LIBEXPAT)
1632
1633/* Parse a btrace-conf "bts" xml record. */
1634
1635static void
1636parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1637 const struct gdb_xml_element *element,
1638 void *user_data, VEC (gdb_xml_value_s) *attributes)
1639{
1640 struct btrace_config *conf;
d33501a5 1641 struct gdb_xml_value *size;
f4abbc16 1642
9a3c8263 1643 conf = (struct btrace_config *) user_data;
f4abbc16 1644 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
1645 conf->bts.size = 0;
1646
1647 size = xml_find_attribute (attributes, "size");
1648 if (size != NULL)
b20a6524 1649 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
1650}
1651
b20a6524
MM
1652/* Parse a btrace-conf "pt" xml record. */
1653
1654static void
1655parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1656 const struct gdb_xml_element *element,
1657 void *user_data, VEC (gdb_xml_value_s) *attributes)
1658{
1659 struct btrace_config *conf;
1660 struct gdb_xml_value *size;
1661
9a3c8263 1662 conf = (struct btrace_config *) user_data;
b20a6524
MM
1663 conf->format = BTRACE_FORMAT_PT;
1664 conf->pt.size = 0;
1665
1666 size = xml_find_attribute (attributes, "size");
1667 if (size != NULL)
1668 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1669}
1670
1671static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1672 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1673 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1674};
1675
d33501a5
MM
1676static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1677 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1678 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1679};
1680
f4abbc16 1681static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
1682 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1683 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
1684 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1685 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
1686 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1687};
1688
1689static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1690 { "version", GDB_XML_AF_NONE, NULL, NULL },
1691 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1692};
1693
1694static const struct gdb_xml_element btrace_conf_elements[] = {
1695 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1696 GDB_XML_EF_NONE, NULL, NULL },
1697 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1698};
1699
1700#endif /* defined (HAVE_LIBEXPAT) */
1701
1702/* See btrace.h. */
1703
1704void
1705parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1706{
1707 int errcode;
1708
1709#if defined (HAVE_LIBEXPAT)
1710
1711 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1712 btrace_conf_elements, xml, conf);
1713 if (errcode != 0)
1714 error (_("Error parsing branch trace configuration."));
1715
1716#else /* !defined (HAVE_LIBEXPAT) */
1717
1718 error (_("XML parsing is not supported."));
1719
1720#endif /* !defined (HAVE_LIBEXPAT) */
1721}
1722
23a7fe75
MM
1723/* See btrace.h. */
1724
1725const struct btrace_insn *
1726btrace_insn_get (const struct btrace_insn_iterator *it)
1727{
1728 const struct btrace_function *bfun;
1729 unsigned int index, end;
1730
1731 index = it->index;
1732 bfun = it->function;
1733
31fd9caa
MM
1734 /* Check if the iterator points to a gap in the trace. */
1735 if (bfun->errcode != 0)
1736 return NULL;
1737
23a7fe75
MM
1738 /* The index is within the bounds of this function's instruction vector. */
1739 end = VEC_length (btrace_insn_s, bfun->insn);
1740 gdb_assert (0 < end);
1741 gdb_assert (index < end);
1742
1743 return VEC_index (btrace_insn_s, bfun->insn, index);
1744}
1745
1746/* See btrace.h. */
1747
1748unsigned int
1749btrace_insn_number (const struct btrace_insn_iterator *it)
1750{
1751 const struct btrace_function *bfun;
1752
1753 bfun = it->function;
31fd9caa
MM
1754
1755 /* Return zero if the iterator points to a gap in the trace. */
1756 if (bfun->errcode != 0)
1757 return 0;
1758
23a7fe75
MM
1759 return bfun->insn_offset + it->index;
1760}
1761
1762/* See btrace.h. */
1763
1764void
1765btrace_insn_begin (struct btrace_insn_iterator *it,
1766 const struct btrace_thread_info *btinfo)
1767{
1768 const struct btrace_function *bfun;
1769
1770 bfun = btinfo->begin;
1771 if (bfun == NULL)
1772 error (_("No trace."));
1773
1774 it->function = bfun;
1775 it->index = 0;
1776}
1777
1778/* See btrace.h. */
1779
1780void
1781btrace_insn_end (struct btrace_insn_iterator *it,
1782 const struct btrace_thread_info *btinfo)
1783{
1784 const struct btrace_function *bfun;
1785 unsigned int length;
1786
1787 bfun = btinfo->end;
1788 if (bfun == NULL)
1789 error (_("No trace."));
1790
23a7fe75
MM
1791 length = VEC_length (btrace_insn_s, bfun->insn);
1792
31fd9caa
MM
1793 /* The last function may either be a gap or it contains the current
1794 instruction, which is one past the end of the execution trace; ignore
1795 it. */
1796 if (length > 0)
1797 length -= 1;
1798
23a7fe75 1799 it->function = bfun;
31fd9caa 1800 it->index = length;
23a7fe75
MM
1801}
1802
1803/* See btrace.h. */
1804
1805unsigned int
1806btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1807{
1808 const struct btrace_function *bfun;
1809 unsigned int index, steps;
1810
1811 bfun = it->function;
1812 steps = 0;
1813 index = it->index;
1814
1815 while (stride != 0)
1816 {
1817 unsigned int end, space, adv;
1818
1819 end = VEC_length (btrace_insn_s, bfun->insn);
1820
31fd9caa
MM
1821 /* An empty function segment represents a gap in the trace. We count
1822 it as one instruction. */
1823 if (end == 0)
1824 {
1825 const struct btrace_function *next;
1826
1827 next = bfun->flow.next;
1828 if (next == NULL)
1829 break;
1830
1831 stride -= 1;
1832 steps += 1;
1833
1834 bfun = next;
1835 index = 0;
1836
1837 continue;
1838 }
1839
23a7fe75
MM
1840 gdb_assert (0 < end);
1841 gdb_assert (index < end);
1842
1843 /* Compute the number of instructions remaining in this segment. */
1844 space = end - index;
1845
1846 /* Advance the iterator as far as possible within this segment. */
325fac50 1847 adv = std::min (space, stride);
23a7fe75
MM
1848 stride -= adv;
1849 index += adv;
1850 steps += adv;
1851
1852 /* Move to the next function if we're at the end of this one. */
1853 if (index == end)
1854 {
1855 const struct btrace_function *next;
1856
1857 next = bfun->flow.next;
1858 if (next == NULL)
1859 {
1860 /* We stepped past the last function.
1861
1862 Let's adjust the index to point to the last instruction in
1863 the previous function. */
1864 index -= 1;
1865 steps -= 1;
1866 break;
1867 }
1868
1869 /* We now point to the first instruction in the new function. */
1870 bfun = next;
1871 index = 0;
1872 }
1873
1874 /* We did make progress. */
1875 gdb_assert (adv > 0);
1876 }
1877
1878 /* Update the iterator. */
1879 it->function = bfun;
1880 it->index = index;
1881
1882 return steps;
1883}
1884
1885/* See btrace.h. */
1886
1887unsigned int
1888btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1889{
1890 const struct btrace_function *bfun;
1891 unsigned int index, steps;
1892
1893 bfun = it->function;
1894 steps = 0;
1895 index = it->index;
1896
1897 while (stride != 0)
1898 {
1899 unsigned int adv;
1900
1901 /* Move to the previous function if we're at the start of this one. */
1902 if (index == 0)
1903 {
1904 const struct btrace_function *prev;
1905
1906 prev = bfun->flow.prev;
1907 if (prev == NULL)
1908 break;
1909
1910 /* We point to one after the last instruction in the new function. */
1911 bfun = prev;
1912 index = VEC_length (btrace_insn_s, bfun->insn);
1913
31fd9caa
MM
1914 /* An empty function segment represents a gap in the trace. We count
1915 it as one instruction. */
1916 if (index == 0)
1917 {
1918 stride -= 1;
1919 steps += 1;
1920
1921 continue;
1922 }
23a7fe75
MM
1923 }
1924
1925 /* Advance the iterator as far as possible within this segment. */
325fac50 1926 adv = std::min (index, stride);
31fd9caa 1927
23a7fe75
MM
1928 stride -= adv;
1929 index -= adv;
1930 steps += adv;
1931
1932 /* We did make progress. */
1933 gdb_assert (adv > 0);
1934 }
1935
1936 /* Update the iterator. */
1937 it->function = bfun;
1938 it->index = index;
1939
1940 return steps;
1941}
1942
1943/* See btrace.h. */
1944
1945int
1946btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1947 const struct btrace_insn_iterator *rhs)
1948{
1949 unsigned int lnum, rnum;
1950
1951 lnum = btrace_insn_number (lhs);
1952 rnum = btrace_insn_number (rhs);
1953
31fd9caa
MM
1954 /* A gap has an instruction number of zero. Things are getting more
1955 complicated if gaps are involved.
1956
1957 We take the instruction number offset from the iterator's function.
1958 This is the number of the first instruction after the gap.
1959
1960 This is OK as long as both lhs and rhs point to gaps. If only one of
1961 them does, we need to adjust the number based on the other's regular
1962 instruction number. Otherwise, a gap might compare equal to an
1963 instruction. */
1964
1965 if (lnum == 0 && rnum == 0)
1966 {
1967 lnum = lhs->function->insn_offset;
1968 rnum = rhs->function->insn_offset;
1969 }
1970 else if (lnum == 0)
1971 {
1972 lnum = lhs->function->insn_offset;
1973
1974 if (lnum == rnum)
1975 lnum -= 1;
1976 }
1977 else if (rnum == 0)
1978 {
1979 rnum = rhs->function->insn_offset;
1980
1981 if (rnum == lnum)
1982 rnum -= 1;
1983 }
1984
23a7fe75
MM
1985 return (int) (lnum - rnum);
1986}
1987
1988/* See btrace.h. */
1989
1990int
1991btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1992 const struct btrace_thread_info *btinfo,
1993 unsigned int number)
1994{
1995 const struct btrace_function *bfun;
31fd9caa 1996 unsigned int end, length;
23a7fe75
MM
1997
1998 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
31fd9caa
MM
1999 {
2000 /* Skip gaps. */
2001 if (bfun->errcode != 0)
2002 continue;
2003
2004 if (bfun->insn_offset <= number)
2005 break;
2006 }
23a7fe75
MM
2007
2008 if (bfun == NULL)
2009 return 0;
2010
31fd9caa
MM
2011 length = VEC_length (btrace_insn_s, bfun->insn);
2012 gdb_assert (length > 0);
2013
2014 end = bfun->insn_offset + length;
23a7fe75
MM
2015 if (end <= number)
2016 return 0;
2017
2018 it->function = bfun;
2019 it->index = number - bfun->insn_offset;
2020
2021 return 1;
2022}
2023
2024/* See btrace.h. */
2025
2026const struct btrace_function *
2027btrace_call_get (const struct btrace_call_iterator *it)
2028{
2029 return it->function;
2030}
2031
2032/* See btrace.h. */
2033
2034unsigned int
2035btrace_call_number (const struct btrace_call_iterator *it)
2036{
2037 const struct btrace_thread_info *btinfo;
2038 const struct btrace_function *bfun;
2039 unsigned int insns;
2040
2041 btinfo = it->btinfo;
2042 bfun = it->function;
2043 if (bfun != NULL)
2044 return bfun->number;
2045
2046 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2047 number of the last function. */
2048 bfun = btinfo->end;
2049 insns = VEC_length (btrace_insn_s, bfun->insn);
2050
2051 /* If the function contains only a single instruction (i.e. the current
2052 instruction), it will be skipped and its number is already the number
2053 we seek. */
2054 if (insns == 1)
2055 return bfun->number;
2056
2057 /* Otherwise, return one more than the number of the last function. */
2058 return bfun->number + 1;
2059}
2060
2061/* See btrace.h. */
2062
2063void
2064btrace_call_begin (struct btrace_call_iterator *it,
2065 const struct btrace_thread_info *btinfo)
2066{
2067 const struct btrace_function *bfun;
2068
2069 bfun = btinfo->begin;
2070 if (bfun == NULL)
2071 error (_("No trace."));
2072
2073 it->btinfo = btinfo;
2074 it->function = bfun;
2075}
2076
2077/* See btrace.h. */
2078
2079void
2080btrace_call_end (struct btrace_call_iterator *it,
2081 const struct btrace_thread_info *btinfo)
2082{
2083 const struct btrace_function *bfun;
2084
2085 bfun = btinfo->end;
2086 if (bfun == NULL)
2087 error (_("No trace."));
2088
2089 it->btinfo = btinfo;
2090 it->function = NULL;
2091}
2092
2093/* See btrace.h. */
2094
2095unsigned int
2096btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2097{
2098 const struct btrace_function *bfun;
2099 unsigned int steps;
2100
2101 bfun = it->function;
2102 steps = 0;
2103 while (bfun != NULL)
2104 {
2105 const struct btrace_function *next;
2106 unsigned int insns;
2107
2108 next = bfun->flow.next;
2109 if (next == NULL)
2110 {
2111 /* Ignore the last function if it only contains a single
2112 (i.e. the current) instruction. */
2113 insns = VEC_length (btrace_insn_s, bfun->insn);
2114 if (insns == 1)
2115 steps -= 1;
2116 }
2117
2118 if (stride == steps)
2119 break;
2120
2121 bfun = next;
2122 steps += 1;
2123 }
2124
2125 it->function = bfun;
2126 return steps;
2127}
2128
2129/* See btrace.h. */
2130
2131unsigned int
2132btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2133{
2134 const struct btrace_thread_info *btinfo;
2135 const struct btrace_function *bfun;
2136 unsigned int steps;
2137
2138 bfun = it->function;
2139 steps = 0;
2140
2141 if (bfun == NULL)
2142 {
2143 unsigned int insns;
2144
2145 btinfo = it->btinfo;
2146 bfun = btinfo->end;
2147 if (bfun == NULL)
2148 return 0;
2149
2150 /* Ignore the last function if it only contains a single
2151 (i.e. the current) instruction. */
2152 insns = VEC_length (btrace_insn_s, bfun->insn);
2153 if (insns == 1)
2154 bfun = bfun->flow.prev;
2155
2156 if (bfun == NULL)
2157 return 0;
2158
2159 steps += 1;
2160 }
2161
2162 while (steps < stride)
2163 {
2164 const struct btrace_function *prev;
2165
2166 prev = bfun->flow.prev;
2167 if (prev == NULL)
2168 break;
2169
2170 bfun = prev;
2171 steps += 1;
2172 }
2173
2174 it->function = bfun;
2175 return steps;
2176}
2177
2178/* See btrace.h. */
2179
2180int
2181btrace_call_cmp (const struct btrace_call_iterator *lhs,
2182 const struct btrace_call_iterator *rhs)
2183{
2184 unsigned int lnum, rnum;
2185
2186 lnum = btrace_call_number (lhs);
2187 rnum = btrace_call_number (rhs);
2188
2189 return (int) (lnum - rnum);
2190}
2191
2192/* See btrace.h. */
2193
2194int
2195btrace_find_call_by_number (struct btrace_call_iterator *it,
2196 const struct btrace_thread_info *btinfo,
2197 unsigned int number)
2198{
2199 const struct btrace_function *bfun;
2200
2201 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2202 {
2203 unsigned int bnum;
2204
2205 bnum = bfun->number;
2206 if (number == bnum)
2207 {
2208 it->btinfo = btinfo;
2209 it->function = bfun;
2210 return 1;
2211 }
2212
2213 /* Functions are ordered and numbered consecutively. We could bail out
2214 earlier. On the other hand, it is very unlikely that we search for
2215 a nonexistent function. */
2216 }
2217
2218 return 0;
2219}
2220
2221/* See btrace.h. */
2222
2223void
2224btrace_set_insn_history (struct btrace_thread_info *btinfo,
2225 const struct btrace_insn_iterator *begin,
2226 const struct btrace_insn_iterator *end)
2227{
2228 if (btinfo->insn_history == NULL)
8d749320 2229 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2230
2231 btinfo->insn_history->begin = *begin;
2232 btinfo->insn_history->end = *end;
2233}
2234
2235/* See btrace.h. */
2236
2237void
2238btrace_set_call_history (struct btrace_thread_info *btinfo,
2239 const struct btrace_call_iterator *begin,
2240 const struct btrace_call_iterator *end)
2241{
2242 gdb_assert (begin->btinfo == end->btinfo);
2243
2244 if (btinfo->call_history == NULL)
8d749320 2245 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2246
2247 btinfo->call_history->begin = *begin;
2248 btinfo->call_history->end = *end;
2249}
07bbe694
MM
2250
2251/* See btrace.h. */
2252
2253int
2254btrace_is_replaying (struct thread_info *tp)
2255{
2256 return tp->btrace.replay != NULL;
2257}
6e07b1d2
MM
2258
2259/* See btrace.h. */
2260
2261int
2262btrace_is_empty (struct thread_info *tp)
2263{
2264 struct btrace_insn_iterator begin, end;
2265 struct btrace_thread_info *btinfo;
2266
2267 btinfo = &tp->btrace;
2268
2269 if (btinfo->begin == NULL)
2270 return 1;
2271
2272 btrace_insn_begin (&begin, btinfo);
2273 btrace_insn_end (&end, btinfo);
2274
2275 return btrace_insn_cmp (&begin, &end) == 0;
2276}
734b0e4b
MM
2277
2278/* Forward the cleanup request. */
2279
2280static void
2281do_btrace_data_cleanup (void *arg)
2282{
9a3c8263 2283 btrace_data_fini ((struct btrace_data *) arg);
734b0e4b
MM
2284}
2285
2286/* See btrace.h. */
2287
2288struct cleanup *
2289make_cleanup_btrace_data (struct btrace_data *data)
2290{
2291 return make_cleanup (do_btrace_data_cleanup, data);
2292}
b0627500
MM
2293
2294#if defined (HAVE_LIBIPT)
2295
2296/* Print a single packet. */
2297
2298static void
2299pt_print_packet (const struct pt_packet *packet)
2300{
2301 switch (packet->type)
2302 {
2303 default:
2304 printf_unfiltered (("[??: %x]"), packet->type);
2305 break;
2306
2307 case ppt_psb:
2308 printf_unfiltered (("psb"));
2309 break;
2310
2311 case ppt_psbend:
2312 printf_unfiltered (("psbend"));
2313 break;
2314
2315 case ppt_pad:
2316 printf_unfiltered (("pad"));
2317 break;
2318
2319 case ppt_tip:
2320 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2321 packet->payload.ip.ipc,
2322 packet->payload.ip.ip);
2323 break;
2324
2325 case ppt_tip_pge:
2326 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2327 packet->payload.ip.ipc,
2328 packet->payload.ip.ip);
2329 break;
2330
2331 case ppt_tip_pgd:
2332 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2333 packet->payload.ip.ipc,
2334 packet->payload.ip.ip);
2335 break;
2336
2337 case ppt_fup:
2338 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2339 packet->payload.ip.ipc,
2340 packet->payload.ip.ip);
2341 break;
2342
2343 case ppt_tnt_8:
2344 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2345 packet->payload.tnt.bit_size,
2346 packet->payload.tnt.payload);
2347 break;
2348
2349 case ppt_tnt_64:
2350 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2351 packet->payload.tnt.bit_size,
2352 packet->payload.tnt.payload);
2353 break;
2354
2355 case ppt_pip:
37fdfe4c
MM
2356 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2357 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2358 break;
2359
2360 case ppt_tsc:
2361 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2362 break;
2363
2364 case ppt_cbr:
2365 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2366 break;
2367
2368 case ppt_mode:
2369 switch (packet->payload.mode.leaf)
2370 {
2371 default:
2372 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2373 break;
2374
2375 case pt_mol_exec:
2376 printf_unfiltered (("mode.exec%s%s"),
2377 packet->payload.mode.bits.exec.csl
2378 ? (" cs.l") : (""),
2379 packet->payload.mode.bits.exec.csd
2380 ? (" cs.d") : (""));
2381 break;
2382
2383 case pt_mol_tsx:
2384 printf_unfiltered (("mode.tsx%s%s"),
2385 packet->payload.mode.bits.tsx.intx
2386 ? (" intx") : (""),
2387 packet->payload.mode.bits.tsx.abrt
2388 ? (" abrt") : (""));
2389 break;
2390 }
2391 break;
2392
2393 case ppt_ovf:
2394 printf_unfiltered (("ovf"));
2395 break;
2396
37fdfe4c
MM
2397 case ppt_stop:
2398 printf_unfiltered (("stop"));
2399 break;
2400
2401 case ppt_vmcs:
2402 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2403 break;
2404
2405 case ppt_tma:
2406 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2407 packet->payload.tma.fc);
2408 break;
2409
2410 case ppt_mtc:
2411 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2412 break;
2413
2414 case ppt_cyc:
2415 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2416 break;
2417
2418 case ppt_mnt:
2419 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2420 break;
b0627500
MM
2421 }
2422}
2423
2424/* Decode packets into MAINT using DECODER. */
2425
2426static void
2427btrace_maint_decode_pt (struct btrace_maint_info *maint,
2428 struct pt_packet_decoder *decoder)
2429{
2430 int errcode;
2431
2432 for (;;)
2433 {
2434 struct btrace_pt_packet packet;
2435
2436 errcode = pt_pkt_sync_forward (decoder);
2437 if (errcode < 0)
2438 break;
2439
2440 for (;;)
2441 {
2442 pt_pkt_get_offset (decoder, &packet.offset);
2443
2444 errcode = pt_pkt_next (decoder, &packet.packet,
2445 sizeof(packet.packet));
2446 if (errcode < 0)
2447 break;
2448
2449 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2450 {
2451 packet.errcode = pt_errcode (errcode);
2452 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2453 &packet);
2454 }
2455 }
2456
2457 if (errcode == -pte_eos)
2458 break;
2459
2460 packet.errcode = pt_errcode (errcode);
2461 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2462 &packet);
2463
2464 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2465 packet.offset, pt_errstr (packet.errcode));
2466 }
2467
2468 if (errcode != -pte_eos)
bc504a31 2469 warning (_("Failed to synchronize onto the Intel Processor Trace "
b0627500
MM
2470 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2471}
2472
2473/* Update the packet history in BTINFO. */
2474
2475static void
2476btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2477{
2478 volatile struct gdb_exception except;
2479 struct pt_packet_decoder *decoder;
2480 struct btrace_data_pt *pt;
2481 struct pt_config config;
2482 int errcode;
2483
2484 pt = &btinfo->data.variant.pt;
2485
2486 /* Nothing to do if there is no trace. */
2487 if (pt->size == 0)
2488 return;
2489
2490 memset (&config, 0, sizeof(config));
2491
2492 config.size = sizeof (config);
2493 config.begin = pt->data;
2494 config.end = pt->data + pt->size;
2495
2496 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2497 config.cpu.family = pt->config.cpu.family;
2498 config.cpu.model = pt->config.cpu.model;
2499 config.cpu.stepping = pt->config.cpu.stepping;
2500
2501 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2502 if (errcode < 0)
bc504a31 2503 error (_("Failed to configure the Intel Processor Trace decoder: %s."),
b0627500
MM
2504 pt_errstr (pt_errcode (errcode)));
2505
2506 decoder = pt_pkt_alloc_decoder (&config);
2507 if (decoder == NULL)
bc504a31 2508 error (_("Failed to allocate the Intel Processor Trace decoder."));
b0627500
MM
2509
2510 TRY
2511 {
2512 btrace_maint_decode_pt (&btinfo->maint, decoder);
2513 }
2514 CATCH (except, RETURN_MASK_ALL)
2515 {
2516 pt_pkt_free_decoder (decoder);
2517
2518 if (except.reason < 0)
2519 throw_exception (except);
2520 }
2521 END_CATCH
2522
2523 pt_pkt_free_decoder (decoder);
2524}
2525
2526#endif /* !defined (HAVE_LIBIPT) */
2527
2528/* Update the packet maintenance information for BTINFO and store the
2529 low and high bounds into BEGIN and END, respectively.
2530 Store the current iterator state into FROM and TO. */
2531
2532static void
2533btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2534 unsigned int *begin, unsigned int *end,
2535 unsigned int *from, unsigned int *to)
2536{
2537 switch (btinfo->data.format)
2538 {
2539 default:
2540 *begin = 0;
2541 *end = 0;
2542 *from = 0;
2543 *to = 0;
2544 break;
2545
2546 case BTRACE_FORMAT_BTS:
2547 /* Nothing to do - we operate directly on BTINFO->DATA. */
2548 *begin = 0;
2549 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2550 *from = btinfo->maint.variant.bts.packet_history.begin;
2551 *to = btinfo->maint.variant.bts.packet_history.end;
2552 break;
2553
2554#if defined (HAVE_LIBIPT)
2555 case BTRACE_FORMAT_PT:
2556 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2557 btrace_maint_update_pt_packets (btinfo);
2558
2559 *begin = 0;
2560 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2561 *from = btinfo->maint.variant.pt.packet_history.begin;
2562 *to = btinfo->maint.variant.pt.packet_history.end;
2563 break;
2564#endif /* defined (HAVE_LIBIPT) */
2565 }
2566}
2567
2568/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2569 update the current iterator position. */
2570
2571static void
2572btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2573 unsigned int begin, unsigned int end)
2574{
2575 switch (btinfo->data.format)
2576 {
2577 default:
2578 break;
2579
2580 case BTRACE_FORMAT_BTS:
2581 {
2582 VEC (btrace_block_s) *blocks;
2583 unsigned int blk;
2584
2585 blocks = btinfo->data.variant.bts.blocks;
2586 for (blk = begin; blk < end; ++blk)
2587 {
2588 const btrace_block_s *block;
2589
2590 block = VEC_index (btrace_block_s, blocks, blk);
2591
2592 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2593 core_addr_to_string_nz (block->begin),
2594 core_addr_to_string_nz (block->end));
2595 }
2596
2597 btinfo->maint.variant.bts.packet_history.begin = begin;
2598 btinfo->maint.variant.bts.packet_history.end = end;
2599 }
2600 break;
2601
2602#if defined (HAVE_LIBIPT)
2603 case BTRACE_FORMAT_PT:
2604 {
2605 VEC (btrace_pt_packet_s) *packets;
2606 unsigned int pkt;
2607
2608 packets = btinfo->maint.variant.pt.packets;
2609 for (pkt = begin; pkt < end; ++pkt)
2610 {
2611 const struct btrace_pt_packet *packet;
2612
2613 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2614
2615 printf_unfiltered ("%u\t", pkt);
2616 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2617
2618 if (packet->errcode == pte_ok)
2619 pt_print_packet (&packet->packet);
2620 else
2621 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2622
2623 printf_unfiltered ("\n");
2624 }
2625
2626 btinfo->maint.variant.pt.packet_history.begin = begin;
2627 btinfo->maint.variant.pt.packet_history.end = end;
2628 }
2629 break;
2630#endif /* defined (HAVE_LIBIPT) */
2631 }
2632}
2633
2634/* Read a number from an argument string. */
2635
2636static unsigned int
2637get_uint (char **arg)
2638{
2639 char *begin, *end, *pos;
2640 unsigned long number;
2641
2642 begin = *arg;
2643 pos = skip_spaces (begin);
2644
2645 if (!isdigit (*pos))
2646 error (_("Expected positive number, got: %s."), pos);
2647
2648 number = strtoul (pos, &end, 10);
2649 if (number > UINT_MAX)
2650 error (_("Number too big."));
2651
2652 *arg += (end - begin);
2653
2654 return (unsigned int) number;
2655}
2656
2657/* Read a context size from an argument string. */
2658
2659static int
2660get_context_size (char **arg)
2661{
2662 char *pos;
2663 int number;
2664
2665 pos = skip_spaces (*arg);
2666
2667 if (!isdigit (*pos))
2668 error (_("Expected positive number, got: %s."), pos);
2669
2670 return strtol (pos, arg, 10);
2671}
2672
2673/* Complain about junk at the end of an argument string. */
2674
2675static void
2676no_chunk (char *arg)
2677{
2678 if (*arg != 0)
2679 error (_("Junk after argument: %s."), arg);
2680}
2681
2682/* The "maintenance btrace packet-history" command. */
2683
2684static void
2685maint_btrace_packet_history_cmd (char *arg, int from_tty)
2686{
2687 struct btrace_thread_info *btinfo;
2688 struct thread_info *tp;
2689 unsigned int size, begin, end, from, to;
2690
2691 tp = find_thread_ptid (inferior_ptid);
2692 if (tp == NULL)
2693 error (_("No thread."));
2694
2695 size = 10;
2696 btinfo = &tp->btrace;
2697
2698 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2699 if (begin == end)
2700 {
2701 printf_unfiltered (_("No trace.\n"));
2702 return;
2703 }
2704
2705 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2706 {
2707 from = to;
2708
2709 if (end - from < size)
2710 size = end - from;
2711 to = from + size;
2712 }
2713 else if (strcmp (arg, "-") == 0)
2714 {
2715 to = from;
2716
2717 if (to - begin < size)
2718 size = to - begin;
2719 from = to - size;
2720 }
2721 else
2722 {
2723 from = get_uint (&arg);
2724 if (end <= from)
2725 error (_("'%u' is out of range."), from);
2726
2727 arg = skip_spaces (arg);
2728 if (*arg == ',')
2729 {
2730 arg = skip_spaces (++arg);
2731
2732 if (*arg == '+')
2733 {
2734 arg += 1;
2735 size = get_context_size (&arg);
2736
2737 no_chunk (arg);
2738
2739 if (end - from < size)
2740 size = end - from;
2741 to = from + size;
2742 }
2743 else if (*arg == '-')
2744 {
2745 arg += 1;
2746 size = get_context_size (&arg);
2747
2748 no_chunk (arg);
2749
2750 /* Include the packet given as first argument. */
2751 from += 1;
2752 to = from;
2753
2754 if (to - begin < size)
2755 size = to - begin;
2756 from = to - size;
2757 }
2758 else
2759 {
2760 to = get_uint (&arg);
2761
2762 /* Include the packet at the second argument and silently
2763 truncate the range. */
2764 if (to < end)
2765 to += 1;
2766 else
2767 to = end;
2768
2769 no_chunk (arg);
2770 }
2771 }
2772 else
2773 {
2774 no_chunk (arg);
2775
2776 if (end - from < size)
2777 size = end - from;
2778 to = from + size;
2779 }
2780
2781 dont_repeat ();
2782 }
2783
2784 btrace_maint_print_packets (btinfo, from, to);
2785}
2786
2787/* The "maintenance btrace clear-packet-history" command. */
2788
2789static void
2790maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2791{
2792 struct btrace_thread_info *btinfo;
2793 struct thread_info *tp;
2794
2795 if (args != NULL && *args != 0)
2796 error (_("Invalid argument."));
2797
2798 tp = find_thread_ptid (inferior_ptid);
2799 if (tp == NULL)
2800 error (_("No thread."));
2801
2802 btinfo = &tp->btrace;
2803
2804 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2805 btrace_maint_clear (btinfo);
2806 btrace_data_clear (&btinfo->data);
2807}
2808
2809/* The "maintenance btrace clear" command. */
2810
2811static void
2812maint_btrace_clear_cmd (char *args, int from_tty)
2813{
2814 struct btrace_thread_info *btinfo;
2815 struct thread_info *tp;
2816
2817 if (args != NULL && *args != 0)
2818 error (_("Invalid argument."));
2819
2820 tp = find_thread_ptid (inferior_ptid);
2821 if (tp == NULL)
2822 error (_("No thread."));
2823
2824 btrace_clear (tp);
2825}
2826
2827/* The "maintenance btrace" command. */
2828
2829static void
2830maint_btrace_cmd (char *args, int from_tty)
2831{
2832 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2833 gdb_stdout);
2834}
2835
2836/* The "maintenance set btrace" command. */
2837
2838static void
2839maint_btrace_set_cmd (char *args, int from_tty)
2840{
2841 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2842 gdb_stdout);
2843}
2844
2845/* The "maintenance show btrace" command. */
2846
2847static void
2848maint_btrace_show_cmd (char *args, int from_tty)
2849{
2850 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2851 all_commands, gdb_stdout);
2852}
2853
2854/* The "maintenance set btrace pt" command. */
2855
2856static void
2857maint_btrace_pt_set_cmd (char *args, int from_tty)
2858{
2859 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2860 all_commands, gdb_stdout);
2861}
2862
2863/* The "maintenance show btrace pt" command. */
2864
2865static void
2866maint_btrace_pt_show_cmd (char *args, int from_tty)
2867{
2868 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2869 all_commands, gdb_stdout);
2870}
2871
2872/* The "maintenance info btrace" command. */
2873
2874static void
2875maint_info_btrace_cmd (char *args, int from_tty)
2876{
2877 struct btrace_thread_info *btinfo;
2878 struct thread_info *tp;
2879 const struct btrace_config *conf;
2880
2881 if (args != NULL && *args != 0)
2882 error (_("Invalid argument."));
2883
2884 tp = find_thread_ptid (inferior_ptid);
2885 if (tp == NULL)
2886 error (_("No thread."));
2887
2888 btinfo = &tp->btrace;
2889
2890 conf = btrace_conf (btinfo);
2891 if (conf == NULL)
2892 error (_("No btrace configuration."));
2893
2894 printf_unfiltered (_("Format: %s.\n"),
2895 btrace_format_string (conf->format));
2896
2897 switch (conf->format)
2898 {
2899 default:
2900 break;
2901
2902 case BTRACE_FORMAT_BTS:
2903 printf_unfiltered (_("Number of packets: %u.\n"),
2904 VEC_length (btrace_block_s,
2905 btinfo->data.variant.bts.blocks));
2906 break;
2907
2908#if defined (HAVE_LIBIPT)
2909 case BTRACE_FORMAT_PT:
2910 {
2911 struct pt_version version;
2912
2913 version = pt_library_version ();
2914 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2915 version.minor, version.build,
2916 version.ext != NULL ? version.ext : "");
2917
2918 btrace_maint_update_pt_packets (btinfo);
2919 printf_unfiltered (_("Number of packets: %u.\n"),
2920 VEC_length (btrace_pt_packet_s,
2921 btinfo->maint.variant.pt.packets));
2922 }
2923 break;
2924#endif /* defined (HAVE_LIBIPT) */
2925 }
2926}
2927
2928/* The "maint show btrace pt skip-pad" show value function. */
2929
2930static void
2931show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2932 struct cmd_list_element *c,
2933 const char *value)
2934{
2935 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2936}
2937
2938
2939/* Initialize btrace maintenance commands. */
2940
2941void _initialize_btrace (void);
2942void
2943_initialize_btrace (void)
2944{
2945 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2946 _("Info about branch tracing data."), &maintenanceinfolist);
2947
2948 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2949 _("Branch tracing maintenance commands."),
2950 &maint_btrace_cmdlist, "maintenance btrace ",
2951 0, &maintenancelist);
2952
2953 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2954Set branch tracing specific variables."),
2955 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2956 0, &maintenance_set_cmdlist);
2957
2958 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
bc504a31 2959Set Intel Processor Trace specific variables."),
b0627500
MM
2960 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2961 0, &maint_btrace_set_cmdlist);
2962
2963 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2964Show branch tracing specific variables."),
2965 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2966 0, &maintenance_show_cmdlist);
2967
2968 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
bc504a31 2969Show Intel Processor Trace specific variables."),
b0627500
MM
2970 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2971 0, &maint_btrace_show_cmdlist);
2972
2973 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2974 &maint_btrace_pt_skip_pad, _("\
2975Set whether PAD packets should be skipped in the btrace packet history."), _("\
2976Show whether PAD packets should be skipped in the btrace packet history."),_("\
2977When enabled, PAD packets are ignored in the btrace packet history."),
2978 NULL, show_maint_btrace_pt_skip_pad,
2979 &maint_btrace_pt_set_cmdlist,
2980 &maint_btrace_pt_show_cmdlist);
2981
2982 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2983 _("Print the raw branch tracing data.\n\
2984With no argument, print ten more packets after the previous ten-line print.\n\
2985With '-' as argument print ten packets before a previous ten-line print.\n\
2986One argument specifies the starting packet of a ten-line print.\n\
2987Two arguments with comma between specify starting and ending packets to \
2988print.\n\
2989Preceded with '+'/'-' the second argument specifies the distance from the \
2990first.\n"),
2991 &maint_btrace_cmdlist);
2992
2993 add_cmd ("clear-packet-history", class_maintenance,
2994 maint_btrace_clear_packet_history_cmd,
2995 _("Clears the branch tracing packet history.\n\
2996Discards the raw branch tracing data but not the execution history data.\n\
2997"),
2998 &maint_btrace_cmdlist);
2999
3000 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
3001 _("Clears the branch tracing data.\n\
3002Discards the raw branch tracing data and the execution history data.\n\
3003The next 'record' command will fetch the branch tracing data anew.\n\
3004"),
3005 &maint_btrace_cmdlist);
3006
3007}