]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/btrace.c
btrace: do not return out of TRY/CATCH
[thirdparty/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524 34#include "rsp-low.h"
b0627500
MM
35#include "gdbcmd.h"
36#include "cli/cli-utils.h"
b20a6524
MM
37
38#include <inttypes.h>
b0627500
MM
39#include <ctype.h>
40
41/* Command lists for btrace maintenance commands. */
42static struct cmd_list_element *maint_btrace_cmdlist;
43static struct cmd_list_element *maint_btrace_set_cmdlist;
44static struct cmd_list_element *maint_btrace_show_cmdlist;
45static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
46static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
47
48/* Control whether to skip PAD packets when computing the packet history. */
49static int maint_btrace_pt_skip_pad = 1;
b20a6524
MM
50
51static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
52
53/* Print a record debug message. Use do ... while (0) to avoid ambiguities
54 when used in if statements. */
55
56#define DEBUG(msg, args...) \
57 do \
58 { \
59 if (record_debug != 0) \
60 fprintf_unfiltered (gdb_stdlog, \
61 "[btrace] " msg "\n", ##args); \
62 } \
63 while (0)
64
65#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
66
02d27625
MM
67/* Return the function name of a recorded function segment for printing.
68 This function never returns NULL. */
69
70static const char *
23a7fe75 71ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
72{
73 struct minimal_symbol *msym;
74 struct symbol *sym;
75
76 msym = bfun->msym;
77 sym = bfun->sym;
78
79 if (sym != NULL)
80 return SYMBOL_PRINT_NAME (sym);
81
82 if (msym != NULL)
efd66ac6 83 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
84
85 return "<unknown>";
86}
87
88/* Return the file name of a recorded function segment for printing.
89 This function never returns NULL. */
90
91static const char *
23a7fe75 92ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
93{
94 struct symbol *sym;
95 const char *filename;
96
97 sym = bfun->sym;
98
99 if (sym != NULL)
08be3fe3 100 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
101 else
102 filename = "<unknown>";
103
104 return filename;
105}
106
23a7fe75
MM
107/* Return a string representation of the address of an instruction.
108 This function never returns NULL. */
02d27625 109
23a7fe75
MM
110static const char *
111ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 112{
23a7fe75
MM
113 if (insn == NULL)
114 return "<nil>";
115
116 return core_addr_to_string_nz (insn->pc);
02d27625
MM
117}
118
23a7fe75 119/* Print an ftrace debug status message. */
02d27625
MM
120
121static void
23a7fe75 122ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 123{
23a7fe75
MM
124 const char *fun, *file;
125 unsigned int ibegin, iend;
ce0dfbea 126 int level;
23a7fe75
MM
127
128 fun = ftrace_print_function_name (bfun);
129 file = ftrace_print_filename (bfun);
130 level = bfun->level;
131
23a7fe75
MM
132 ibegin = bfun->insn_offset;
133 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
134
ce0dfbea
MM
135 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
136 prefix, fun, file, level, ibegin, iend);
02d27625
MM
137}
138
23a7fe75
MM
139/* Return non-zero if BFUN does not match MFUN and FUN,
140 return zero otherwise. */
02d27625
MM
141
142static int
23a7fe75
MM
143ftrace_function_switched (const struct btrace_function *bfun,
144 const struct minimal_symbol *mfun,
145 const struct symbol *fun)
02d27625
MM
146{
147 struct minimal_symbol *msym;
148 struct symbol *sym;
149
02d27625
MM
150 msym = bfun->msym;
151 sym = bfun->sym;
152
153 /* If the minimal symbol changed, we certainly switched functions. */
154 if (mfun != NULL && msym != NULL
efd66ac6 155 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
156 return 1;
157
158 /* If the symbol changed, we certainly switched functions. */
159 if (fun != NULL && sym != NULL)
160 {
161 const char *bfname, *fname;
162
163 /* Check the function name. */
164 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
165 return 1;
166
167 /* Check the location of those functions, as well. */
08be3fe3
DE
168 bfname = symtab_to_fullname (symbol_symtab (sym));
169 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
170 if (filename_cmp (fname, bfname) != 0)
171 return 1;
172 }
173
23a7fe75
MM
174 /* If we lost symbol information, we switched functions. */
175 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
176 return 1;
177
178 /* If we gained symbol information, we switched functions. */
179 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
180 return 1;
181
02d27625
MM
182 return 0;
183}
184
23a7fe75
MM
185/* Allocate and initialize a new branch trace function segment.
186 PREV is the chronologically preceding function segment.
187 MFUN and FUN are the symbol information we have for this function. */
188
189static struct btrace_function *
190ftrace_new_function (struct btrace_function *prev,
191 struct minimal_symbol *mfun,
192 struct symbol *fun)
193{
194 struct btrace_function *bfun;
195
8d749320 196 bfun = XCNEW (struct btrace_function);
23a7fe75
MM
197
198 bfun->msym = mfun;
199 bfun->sym = fun;
200 bfun->flow.prev = prev;
201
5de9129b
MM
202 if (prev == NULL)
203 {
204 /* Start counting at one. */
205 bfun->number = 1;
206 bfun->insn_offset = 1;
207 }
208 else
23a7fe75
MM
209 {
210 gdb_assert (prev->flow.next == NULL);
211 prev->flow.next = bfun;
02d27625 212
23a7fe75
MM
213 bfun->number = prev->number + 1;
214 bfun->insn_offset = (prev->insn_offset
215 + VEC_length (btrace_insn_s, prev->insn));
31fd9caa 216 bfun->level = prev->level;
23a7fe75
MM
217 }
218
219 return bfun;
02d27625
MM
220}
221
23a7fe75 222/* Update the UP field of a function segment. */
02d27625 223
23a7fe75
MM
224static void
225ftrace_update_caller (struct btrace_function *bfun,
226 struct btrace_function *caller,
227 enum btrace_function_flag flags)
02d27625 228{
23a7fe75
MM
229 if (bfun->up != NULL)
230 ftrace_debug (bfun, "updating caller");
02d27625 231
23a7fe75
MM
232 bfun->up = caller;
233 bfun->flags = flags;
234
235 ftrace_debug (bfun, "set caller");
236}
237
238/* Fix up the caller for all segments of a function. */
239
240static void
241ftrace_fixup_caller (struct btrace_function *bfun,
242 struct btrace_function *caller,
243 enum btrace_function_flag flags)
244{
245 struct btrace_function *prev, *next;
246
247 ftrace_update_caller (bfun, caller, flags);
248
249 /* Update all function segments belonging to the same function. */
250 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
251 ftrace_update_caller (prev, caller, flags);
252
253 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
254 ftrace_update_caller (next, caller, flags);
255}
256
257/* Add a new function segment for a call.
258 CALLER is the chronologically preceding function segment.
259 MFUN and FUN are the symbol information we have for this function. */
260
261static struct btrace_function *
262ftrace_new_call (struct btrace_function *caller,
263 struct minimal_symbol *mfun,
264 struct symbol *fun)
265{
266 struct btrace_function *bfun;
267
268 bfun = ftrace_new_function (caller, mfun, fun);
269 bfun->up = caller;
31fd9caa 270 bfun->level += 1;
23a7fe75
MM
271
272 ftrace_debug (bfun, "new call");
273
274 return bfun;
275}
276
277/* Add a new function segment for a tail call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281static struct btrace_function *
282ftrace_new_tailcall (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285{
286 struct btrace_function *bfun;
02d27625 287
23a7fe75
MM
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
31fd9caa 290 bfun->level += 1;
23a7fe75 291 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 292
23a7fe75
MM
293 ftrace_debug (bfun, "new tail call");
294
295 return bfun;
296}
297
298/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
299 symbol information. */
300
301static struct btrace_function *
302ftrace_find_caller (struct btrace_function *bfun,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305{
306 for (; bfun != NULL; bfun = bfun->up)
307 {
308 /* Skip functions with incompatible symbol information. */
309 if (ftrace_function_switched (bfun, mfun, fun))
310 continue;
311
312 /* This is the function segment we're looking for. */
313 break;
314 }
315
316 return bfun;
317}
318
319/* Find the innermost caller in the back trace of BFUN, skipping all
320 function segments that do not end with a call instruction (e.g.
321 tail calls ending with a jump). */
322
323static struct btrace_function *
7d5c24b3 324ftrace_find_call (struct btrace_function *bfun)
23a7fe75
MM
325{
326 for (; bfun != NULL; bfun = bfun->up)
02d27625 327 {
23a7fe75 328 struct btrace_insn *last;
02d27625 329
31fd9caa
MM
330 /* Skip gaps. */
331 if (bfun->errcode != 0)
332 continue;
23a7fe75
MM
333
334 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 335
7d5c24b3 336 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
337 break;
338 }
339
340 return bfun;
341}
342
343/* Add a continuation segment for a function into which we return.
344 PREV is the chronologically preceding function segment.
345 MFUN and FUN are the symbol information we have for this function. */
346
347static struct btrace_function *
7d5c24b3 348ftrace_new_return (struct btrace_function *prev,
23a7fe75
MM
349 struct minimal_symbol *mfun,
350 struct symbol *fun)
351{
352 struct btrace_function *bfun, *caller;
353
354 bfun = ftrace_new_function (prev, mfun, fun);
355
356 /* It is important to start at PREV's caller. Otherwise, we might find
357 PREV itself, if PREV is a recursive function. */
358 caller = ftrace_find_caller (prev->up, mfun, fun);
359 if (caller != NULL)
360 {
361 /* The caller of PREV is the preceding btrace function segment in this
362 function instance. */
363 gdb_assert (caller->segment.next == NULL);
364
365 caller->segment.next = bfun;
366 bfun->segment.prev = caller;
367
368 /* Maintain the function level. */
369 bfun->level = caller->level;
370
371 /* Maintain the call stack. */
372 bfun->up = caller->up;
373 bfun->flags = caller->flags;
374
375 ftrace_debug (bfun, "new return");
376 }
377 else
378 {
379 /* We did not find a caller. This could mean that something went
380 wrong or that the call is simply not included in the trace. */
02d27625 381
23a7fe75 382 /* Let's search for some actual call. */
7d5c24b3 383 caller = ftrace_find_call (prev->up);
23a7fe75 384 if (caller == NULL)
02d27625 385 {
23a7fe75
MM
386 /* There is no call in PREV's back trace. We assume that the
387 branch trace did not include it. */
388
389 /* Let's find the topmost call function - this skips tail calls. */
390 while (prev->up != NULL)
391 prev = prev->up;
02d27625 392
23a7fe75
MM
393 /* We maintain levels for a series of returns for which we have
394 not seen the calls.
395 We start at the preceding function's level in case this has
396 already been a return for which we have not seen the call.
397 We start at level 0 otherwise, to handle tail calls correctly. */
398 bfun->level = min (0, prev->level) - 1;
399
400 /* Fix up the call stack for PREV. */
401 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
402
403 ftrace_debug (bfun, "new return - no caller");
404 }
405 else
02d27625 406 {
23a7fe75
MM
407 /* There is a call in PREV's back trace to which we should have
408 returned. Let's remain at this level. */
409 bfun->level = prev->level;
02d27625 410
23a7fe75 411 ftrace_debug (bfun, "new return - unknown caller");
02d27625 412 }
23a7fe75
MM
413 }
414
415 return bfun;
416}
417
418/* Add a new function segment for a function switch.
419 PREV is the chronologically preceding function segment.
420 MFUN and FUN are the symbol information we have for this function. */
421
422static struct btrace_function *
423ftrace_new_switch (struct btrace_function *prev,
424 struct minimal_symbol *mfun,
425 struct symbol *fun)
426{
427 struct btrace_function *bfun;
428
429 /* This is an unexplained function switch. The call stack will likely
430 be wrong at this point. */
431 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 432
23a7fe75
MM
433 ftrace_debug (bfun, "new switch");
434
435 return bfun;
436}
437
31fd9caa
MM
438/* Add a new function segment for a gap in the trace due to a decode error.
439 PREV is the chronologically preceding function segment.
440 ERRCODE is the format-specific error code. */
441
442static struct btrace_function *
443ftrace_new_gap (struct btrace_function *prev, int errcode)
444{
445 struct btrace_function *bfun;
446
447 /* We hijack prev if it was empty. */
448 if (prev != NULL && prev->errcode == 0
449 && VEC_empty (btrace_insn_s, prev->insn))
450 bfun = prev;
451 else
452 bfun = ftrace_new_function (prev, NULL, NULL);
453
454 bfun->errcode = errcode;
455
456 ftrace_debug (bfun, "new gap");
457
458 return bfun;
459}
460
23a7fe75
MM
461/* Update BFUN with respect to the instruction at PC. This may create new
462 function segments.
463 Return the chronologically latest function segment, never NULL. */
464
465static struct btrace_function *
7d5c24b3 466ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
23a7fe75
MM
467{
468 struct bound_minimal_symbol bmfun;
469 struct minimal_symbol *mfun;
470 struct symbol *fun;
471 struct btrace_insn *last;
472
473 /* Try to determine the function we're in. We use both types of symbols
474 to avoid surprises when we sometimes get a full symbol and sometimes
475 only a minimal symbol. */
476 fun = find_pc_function (pc);
477 bmfun = lookup_minimal_symbol_by_pc (pc);
478 mfun = bmfun.minsym;
479
480 if (fun == NULL && mfun == NULL)
481 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
482
31fd9caa
MM
483 /* If we didn't have a function or if we had a gap before, we create one. */
484 if (bfun == NULL || bfun->errcode != 0)
23a7fe75
MM
485 return ftrace_new_function (bfun, mfun, fun);
486
487 /* Check the last instruction, if we have one.
488 We do this check first, since it allows us to fill in the call stack
489 links in addition to the normal flow links. */
490 last = NULL;
491 if (!VEC_empty (btrace_insn_s, bfun->insn))
492 last = VEC_last (btrace_insn_s, bfun->insn);
493
494 if (last != NULL)
495 {
7d5c24b3
MM
496 switch (last->iclass)
497 {
498 case BTRACE_INSN_RETURN:
986b6601
MM
499 {
500 const char *fname;
501
502 /* On some systems, _dl_runtime_resolve returns to the resolved
503 function instead of jumping to it. From our perspective,
504 however, this is a tailcall.
505 If we treated it as return, we wouldn't be able to find the
506 resolved function in our stack back trace. Hence, we would
507 lose the current stack back trace and start anew with an empty
508 back trace. When the resolved function returns, we would then
509 create a stack back trace with the same function names but
510 different frame id's. This will confuse stepping. */
511 fname = ftrace_print_function_name (bfun);
512 if (strcmp (fname, "_dl_runtime_resolve") == 0)
513 return ftrace_new_tailcall (bfun, mfun, fun);
514
515 return ftrace_new_return (bfun, mfun, fun);
516 }
23a7fe75 517
7d5c24b3
MM
518 case BTRACE_INSN_CALL:
519 /* Ignore calls to the next instruction. They are used for PIC. */
520 if (last->pc + last->size == pc)
521 break;
23a7fe75 522
7d5c24b3 523 return ftrace_new_call (bfun, mfun, fun);
23a7fe75 524
7d5c24b3
MM
525 case BTRACE_INSN_JUMP:
526 {
527 CORE_ADDR start;
23a7fe75 528
7d5c24b3 529 start = get_pc_function_start (pc);
23a7fe75 530
7d5c24b3
MM
531 /* If we can't determine the function for PC, we treat a jump at
532 the end of the block as tail call. */
533 if (start == 0 || start == pc)
534 return ftrace_new_tailcall (bfun, mfun, fun);
535 }
02d27625 536 }
23a7fe75
MM
537 }
538
539 /* Check if we're switching functions for some other reason. */
540 if (ftrace_function_switched (bfun, mfun, fun))
541 {
542 DEBUG_FTRACE ("switching from %s in %s at %s",
543 ftrace_print_insn_addr (last),
544 ftrace_print_function_name (bfun),
545 ftrace_print_filename (bfun));
02d27625 546
23a7fe75
MM
547 return ftrace_new_switch (bfun, mfun, fun);
548 }
549
550 return bfun;
551}
552
23a7fe75
MM
553/* Add the instruction at PC to BFUN's instructions. */
554
555static void
7d5c24b3
MM
556ftrace_update_insns (struct btrace_function *bfun,
557 const struct btrace_insn *insn)
23a7fe75 558{
7d5c24b3 559 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
560
561 if (record_debug > 1)
562 ftrace_debug (bfun, "update insn");
563}
564
7d5c24b3
MM
565/* Classify the instruction at PC. */
566
567static enum btrace_insn_class
568ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
569{
7d5c24b3
MM
570 enum btrace_insn_class iclass;
571
572 iclass = BTRACE_INSN_OTHER;
492d29ea 573 TRY
7d5c24b3
MM
574 {
575 if (gdbarch_insn_is_call (gdbarch, pc))
576 iclass = BTRACE_INSN_CALL;
577 else if (gdbarch_insn_is_ret (gdbarch, pc))
578 iclass = BTRACE_INSN_RETURN;
579 else if (gdbarch_insn_is_jump (gdbarch, pc))
580 iclass = BTRACE_INSN_JUMP;
581 }
492d29ea
PA
582 CATCH (error, RETURN_MASK_ERROR)
583 {
584 }
585 END_CATCH
7d5c24b3
MM
586
587 return iclass;
588}
589
734b0e4b 590/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
591
592static void
76235df1 593btrace_compute_ftrace_bts (struct thread_info *tp,
734b0e4b 594 const struct btrace_data_bts *btrace)
23a7fe75 595{
76235df1 596 struct btrace_thread_info *btinfo;
23a7fe75
MM
597 struct btrace_function *begin, *end;
598 struct gdbarch *gdbarch;
31fd9caa 599 unsigned int blk, ngaps;
23a7fe75
MM
600 int level;
601
23a7fe75 602 gdbarch = target_gdbarch ();
76235df1 603 btinfo = &tp->btrace;
969c39fb
MM
604 begin = btinfo->begin;
605 end = btinfo->end;
31fd9caa 606 ngaps = btinfo->ngaps;
969c39fb 607 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 608 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
609
610 while (blk != 0)
611 {
612 btrace_block_s *block;
613 CORE_ADDR pc;
614
615 blk -= 1;
616
734b0e4b 617 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
618 pc = block->begin;
619
620 for (;;)
621 {
7d5c24b3 622 struct btrace_insn insn;
23a7fe75
MM
623 int size;
624
625 /* We should hit the end of the block. Warn if we went too far. */
626 if (block->end < pc)
627 {
31fd9caa
MM
628 /* Indicate the gap in the trace - unless we're at the
629 beginning. */
630 if (begin != NULL)
631 {
632 warning (_("Recorded trace may be corrupted around %s."),
633 core_addr_to_string_nz (pc));
634
635 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
636 ngaps += 1;
637 }
23a7fe75
MM
638 break;
639 }
640
7d5c24b3 641 end = ftrace_update_function (end, pc);
23a7fe75
MM
642 if (begin == NULL)
643 begin = end;
644
8710b709
MM
645 /* Maintain the function level offset.
646 For all but the last block, we do it here. */
647 if (blk != 0)
648 level = min (level, end->level);
23a7fe75 649
7d5c24b3 650 size = 0;
492d29ea
PA
651 TRY
652 {
653 size = gdb_insn_length (gdbarch, pc);
654 }
655 CATCH (error, RETURN_MASK_ERROR)
656 {
657 }
658 END_CATCH
7d5c24b3
MM
659
660 insn.pc = pc;
661 insn.size = size;
662 insn.iclass = ftrace_classify_insn (gdbarch, pc);
da8c46d2 663 insn.flags = 0;
7d5c24b3
MM
664
665 ftrace_update_insns (end, &insn);
23a7fe75
MM
666
667 /* We're done once we pushed the instruction at the end. */
668 if (block->end == pc)
669 break;
670
7d5c24b3 671 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
672 if (size <= 0)
673 {
674 warning (_("Recorded trace may be incomplete around %s."),
675 core_addr_to_string_nz (pc));
31fd9caa
MM
676
677 /* Indicate the gap in the trace. We just added INSN so we're
678 not at the beginning. */
679 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
680 ngaps += 1;
681
23a7fe75
MM
682 break;
683 }
684
685 pc += size;
8710b709
MM
686
687 /* Maintain the function level offset.
688 For the last block, we do it here to not consider the last
689 instruction.
690 Since the last instruction corresponds to the current instruction
691 and is not really part of the execution history, it shouldn't
692 affect the level. */
693 if (blk == 0)
694 level = min (level, end->level);
23a7fe75 695 }
02d27625
MM
696 }
697
23a7fe75
MM
698 btinfo->begin = begin;
699 btinfo->end = end;
31fd9caa 700 btinfo->ngaps = ngaps;
23a7fe75
MM
701
702 /* LEVEL is the minimal function level of all btrace function segments.
703 Define the global level offset to -LEVEL so all function levels are
704 normalized to start at zero. */
705 btinfo->level = -level;
02d27625
MM
706}
707
b20a6524
MM
708#if defined (HAVE_LIBIPT)
709
710static enum btrace_insn_class
711pt_reclassify_insn (enum pt_insn_class iclass)
712{
713 switch (iclass)
714 {
715 case ptic_call:
716 return BTRACE_INSN_CALL;
717
718 case ptic_return:
719 return BTRACE_INSN_RETURN;
720
721 case ptic_jump:
722 return BTRACE_INSN_JUMP;
723
724 default:
725 return BTRACE_INSN_OTHER;
726 }
727}
728
da8c46d2
MM
729/* Return the btrace instruction flags for INSN. */
730
731static enum btrace_insn_flag
732pt_btrace_insn_flags (const struct pt_insn *insn)
733{
734 enum btrace_insn_flag flags = 0;
735
736 if (insn->speculative)
737 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
738
739 return flags;
740}
741
b20a6524
MM
742/* Add function branch trace using DECODER. */
743
744static void
745ftrace_add_pt (struct pt_insn_decoder *decoder,
746 struct btrace_function **pbegin,
747 struct btrace_function **pend, int *plevel,
748 unsigned int *ngaps)
749{
750 struct btrace_function *begin, *end, *upd;
751 uint64_t offset;
752 int errcode, nerrors;
753
754 begin = *pbegin;
755 end = *pend;
756 nerrors = 0;
757 for (;;)
758 {
759 struct btrace_insn btinsn;
760 struct pt_insn insn;
761
762 errcode = pt_insn_sync_forward (decoder);
763 if (errcode < 0)
764 {
765 if (errcode != -pte_eos)
766 warning (_("Failed to synchronize onto the Intel(R) Processor "
767 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
768 break;
769 }
770
771 memset (&btinsn, 0, sizeof (btinsn));
772 for (;;)
773 {
774 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
775 if (errcode < 0)
776 break;
777
778 /* Look for gaps in the trace - unless we're at the beginning. */
779 if (begin != NULL)
780 {
781 /* Tracing is disabled and re-enabled each time we enter the
782 kernel. Most times, we continue from the same instruction we
783 stopped before. This is indicated via the RESUMED instruction
784 flag. The ENABLED instruction flag means that we continued
785 from some other instruction. Indicate this as a trace gap. */
786 if (insn.enabled)
787 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
788
789 /* Indicate trace overflows. */
790 if (insn.resynced)
791 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
792 }
793
794 upd = ftrace_update_function (end, insn.ip);
795 if (upd != end)
796 {
797 *pend = end = upd;
798
799 if (begin == NULL)
800 *pbegin = begin = upd;
801 }
802
803 /* Maintain the function level offset. */
804 *plevel = min (*plevel, end->level);
805
806 btinsn.pc = (CORE_ADDR) insn.ip;
807 btinsn.size = (gdb_byte) insn.size;
808 btinsn.iclass = pt_reclassify_insn (insn.iclass);
da8c46d2 809 btinsn.flags = pt_btrace_insn_flags (&insn);
b20a6524
MM
810
811 ftrace_update_insns (end, &btinsn);
812 }
813
814 if (errcode == -pte_eos)
815 break;
816
817 /* If the gap is at the very beginning, we ignore it - we will have
818 less trace, but we won't have any holes in the trace. */
819 if (begin == NULL)
820 continue;
821
822 pt_insn_get_offset (decoder, &offset);
823
824 warning (_("Failed to decode Intel(R) Processor Trace near trace "
825 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
826 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
827
828 /* Indicate the gap in the trace. */
829 *pend = end = ftrace_new_gap (end, errcode);
830 *ngaps += 1;
831 }
832
833 if (nerrors > 0)
834 warning (_("The recorded execution trace may have gaps."));
835}
836
837/* A callback function to allow the trace decoder to read the inferior's
838 memory. */
839
840static int
841btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
80a2b330 842 const struct pt_asid *asid, uint64_t pc,
b20a6524
MM
843 void *context)
844{
43368e1d 845 int result, errcode;
b20a6524 846
43368e1d 847 result = (int) size;
b20a6524
MM
848 TRY
849 {
80a2b330 850 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
b20a6524 851 if (errcode != 0)
43368e1d 852 result = -pte_nomap;
b20a6524
MM
853 }
854 CATCH (error, RETURN_MASK_ERROR)
855 {
43368e1d 856 result = -pte_nomap;
b20a6524
MM
857 }
858 END_CATCH
859
43368e1d 860 return result;
b20a6524
MM
861}
862
863/* Translate the vendor from one enum to another. */
864
865static enum pt_cpu_vendor
866pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
867{
868 switch (vendor)
869 {
870 default:
871 return pcv_unknown;
872
873 case CV_INTEL:
874 return pcv_intel;
875 }
876}
877
878/* Finalize the function branch trace after decode. */
879
880static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
881 struct thread_info *tp, int level)
882{
883 pt_insn_free_decoder (decoder);
884
885 /* LEVEL is the minimal function level of all btrace function segments.
886 Define the global level offset to -LEVEL so all function levels are
887 normalized to start at zero. */
888 tp->btrace.level = -level;
889
890 /* Add a single last instruction entry for the current PC.
891 This allows us to compute the backtrace at the current PC using both
892 standard unwind and btrace unwind.
893 This extra entry is ignored by all record commands. */
894 btrace_add_pc (tp);
895}
896
897/* Compute the function branch trace from Intel(R) Processor Trace. */
898
899static void
900btrace_compute_ftrace_pt (struct thread_info *tp,
901 const struct btrace_data_pt *btrace)
902{
903 struct btrace_thread_info *btinfo;
904 struct pt_insn_decoder *decoder;
905 struct pt_config config;
906 int level, errcode;
907
908 if (btrace->size == 0)
909 return;
910
911 btinfo = &tp->btrace;
912 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
913
914 pt_config_init(&config);
915 config.begin = btrace->data;
916 config.end = btrace->data + btrace->size;
917
918 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
919 config.cpu.family = btrace->config.cpu.family;
920 config.cpu.model = btrace->config.cpu.model;
921 config.cpu.stepping = btrace->config.cpu.stepping;
922
923 errcode = pt_cpu_errata (&config.errata, &config.cpu);
924 if (errcode < 0)
925 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
926 pt_errstr (pt_errcode (errcode)));
927
928 decoder = pt_insn_alloc_decoder (&config);
929 if (decoder == NULL)
930 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
931
932 TRY
933 {
934 struct pt_image *image;
935
936 image = pt_insn_get_image(decoder);
937 if (image == NULL)
938 error (_("Failed to configure the Intel(R) Processor Trace decoder."));
939
940 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
941 if (errcode < 0)
942 error (_("Failed to configure the Intel(R) Processor Trace decoder: "
943 "%s."), pt_errstr (pt_errcode (errcode)));
944
945 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
946 &btinfo->ngaps);
947 }
948 CATCH (error, RETURN_MASK_ALL)
949 {
950 /* Indicate a gap in the trace if we quit trace processing. */
951 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
952 {
953 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
954 btinfo->ngaps++;
955 }
956
957 btrace_finalize_ftrace_pt (decoder, tp, level);
958
959 throw_exception (error);
960 }
961 END_CATCH
962
963 btrace_finalize_ftrace_pt (decoder, tp, level);
964}
965
966#else /* defined (HAVE_LIBIPT) */
967
968static void
969btrace_compute_ftrace_pt (struct thread_info *tp,
970 const struct btrace_data_pt *btrace)
971{
972 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
973}
974
975#endif /* defined (HAVE_LIBIPT) */
976
734b0e4b
MM
977/* Compute the function branch trace from a block branch trace BTRACE for
978 a thread given by BTINFO. */
979
980static void
76235df1 981btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
734b0e4b
MM
982{
983 DEBUG ("compute ftrace");
984
985 switch (btrace->format)
986 {
987 case BTRACE_FORMAT_NONE:
988 return;
989
990 case BTRACE_FORMAT_BTS:
76235df1 991 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
734b0e4b 992 return;
b20a6524
MM
993
994 case BTRACE_FORMAT_PT:
995 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
996 return;
734b0e4b
MM
997 }
998
999 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1000}
1001
6e07b1d2
MM
1002/* Add an entry for the current PC. */
1003
1004static void
1005btrace_add_pc (struct thread_info *tp)
1006{
734b0e4b 1007 struct btrace_data btrace;
6e07b1d2
MM
1008 struct btrace_block *block;
1009 struct regcache *regcache;
1010 struct cleanup *cleanup;
1011 CORE_ADDR pc;
1012
1013 regcache = get_thread_regcache (tp->ptid);
1014 pc = regcache_read_pc (regcache);
1015
734b0e4b
MM
1016 btrace_data_init (&btrace);
1017 btrace.format = BTRACE_FORMAT_BTS;
1018 btrace.variant.bts.blocks = NULL;
6e07b1d2 1019
734b0e4b
MM
1020 cleanup = make_cleanup_btrace_data (&btrace);
1021
1022 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
1023 block->begin = pc;
1024 block->end = pc;
1025
76235df1 1026 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
1027
1028 do_cleanups (cleanup);
1029}
1030
02d27625
MM
1031/* See btrace.h. */
1032
1033void
f4abbc16 1034btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1035{
1036 if (tp->btrace.target != NULL)
1037 return;
1038
46a3515b
MM
1039#if !defined (HAVE_LIBIPT)
1040 if (conf->format == BTRACE_FORMAT_PT)
1041 error (_("GDB does not support Intel(R) Processor Trace."));
1042#endif /* !defined (HAVE_LIBIPT) */
1043
f4abbc16 1044 if (!target_supports_btrace (conf->format))
02d27625
MM
1045 error (_("Target does not support branch tracing."));
1046
1047 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1048
f4abbc16 1049 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2
MM
1050
1051 /* Add an entry for the current PC so we start tracing from where we
1052 enabled it. */
1053 if (tp->btrace.target != NULL)
1054 btrace_add_pc (tp);
02d27625
MM
1055}
1056
1057/* See btrace.h. */
1058
f4abbc16
MM
1059const struct btrace_config *
1060btrace_conf (const struct btrace_thread_info *btinfo)
1061{
1062 if (btinfo->target == NULL)
1063 return NULL;
1064
1065 return target_btrace_conf (btinfo->target);
1066}
1067
1068/* See btrace.h. */
1069
02d27625
MM
1070void
1071btrace_disable (struct thread_info *tp)
1072{
1073 struct btrace_thread_info *btp = &tp->btrace;
1074 int errcode = 0;
1075
1076 if (btp->target == NULL)
1077 return;
1078
1079 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1080
1081 target_disable_btrace (btp->target);
1082 btp->target = NULL;
1083
1084 btrace_clear (tp);
1085}
1086
1087/* See btrace.h. */
1088
1089void
1090btrace_teardown (struct thread_info *tp)
1091{
1092 struct btrace_thread_info *btp = &tp->btrace;
1093 int errcode = 0;
1094
1095 if (btp->target == NULL)
1096 return;
1097
1098 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1099
1100 target_teardown_btrace (btp->target);
1101 btp->target = NULL;
1102
1103 btrace_clear (tp);
1104}
1105
734b0e4b 1106/* Stitch branch trace in BTS format. */
969c39fb
MM
1107
1108static int
31fd9caa 1109btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1110{
31fd9caa 1111 struct btrace_thread_info *btinfo;
969c39fb
MM
1112 struct btrace_function *last_bfun;
1113 struct btrace_insn *last_insn;
1114 btrace_block_s *first_new_block;
1115
31fd9caa 1116 btinfo = &tp->btrace;
969c39fb
MM
1117 last_bfun = btinfo->end;
1118 gdb_assert (last_bfun != NULL);
31fd9caa
MM
1119 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1120
1121 /* If the existing trace ends with a gap, we just glue the traces
1122 together. We need to drop the last (i.e. chronologically first) block
1123 of the new trace, though, since we can't fill in the start address.*/
1124 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1125 {
1126 VEC_pop (btrace_block_s, btrace->blocks);
1127 return 0;
1128 }
969c39fb
MM
1129
1130 /* Beware that block trace starts with the most recent block, so the
1131 chronologically first block in the new trace is the last block in
1132 the new trace's block vector. */
734b0e4b 1133 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1134 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1135
1136 /* If the current PC at the end of the block is the same as in our current
1137 trace, there are two explanations:
1138 1. we executed the instruction and some branch brought us back.
1139 2. we have not made any progress.
1140 In the first case, the delta trace vector should contain at least two
1141 entries.
1142 In the second case, the delta trace vector should contain exactly one
1143 entry for the partial block containing the current PC. Remove it. */
1144 if (first_new_block->end == last_insn->pc
734b0e4b 1145 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1146 {
734b0e4b 1147 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1148 return 0;
1149 }
1150
1151 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1152 core_addr_to_string_nz (first_new_block->end));
1153
1154 /* Do a simple sanity check to make sure we don't accidentally end up
1155 with a bad block. This should not occur in practice. */
1156 if (first_new_block->end < last_insn->pc)
1157 {
1158 warning (_("Error while trying to read delta trace. Falling back to "
1159 "a full read."));
1160 return -1;
1161 }
1162
1163 /* We adjust the last block to start at the end of our current trace. */
1164 gdb_assert (first_new_block->begin == 0);
1165 first_new_block->begin = last_insn->pc;
1166
1167 /* We simply pop the last insn so we can insert it again as part of
1168 the normal branch trace computation.
1169 Since instruction iterators are based on indices in the instructions
1170 vector, we don't leave any pointers dangling. */
1171 DEBUG ("pruning insn at %s for stitching",
1172 ftrace_print_insn_addr (last_insn));
1173
1174 VEC_pop (btrace_insn_s, last_bfun->insn);
1175
1176 /* The instructions vector may become empty temporarily if this has
1177 been the only instruction in this function segment.
1178 This violates the invariant but will be remedied shortly by
1179 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1180
1181 /* The only case where this would hurt is if the entire trace consisted
1182 of just that one instruction. If we remove it, we might turn the now
1183 empty btrace function segment into a gap. But we don't want gaps at
1184 the beginning. To avoid this, we remove the entire old trace. */
1185 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1186 btrace_clear (tp);
1187
969c39fb
MM
1188 return 0;
1189}
1190
734b0e4b
MM
1191/* Adjust the block trace in order to stitch old and new trace together.
1192 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1193 TP is the traced thread.
1194 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1195 Return 0 on success, -1 otherwise. */
1196
1197static int
31fd9caa 1198btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1199{
1200 /* If we don't have trace, there's nothing to do. */
1201 if (btrace_data_empty (btrace))
1202 return 0;
1203
1204 switch (btrace->format)
1205 {
1206 case BTRACE_FORMAT_NONE:
1207 return 0;
1208
1209 case BTRACE_FORMAT_BTS:
31fd9caa 1210 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1211
1212 case BTRACE_FORMAT_PT:
1213 /* Delta reads are not supported. */
1214 return -1;
734b0e4b
MM
1215 }
1216
1217 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1218}
1219
969c39fb
MM
1220/* Clear the branch trace histories in BTINFO. */
1221
1222static void
1223btrace_clear_history (struct btrace_thread_info *btinfo)
1224{
1225 xfree (btinfo->insn_history);
1226 xfree (btinfo->call_history);
1227 xfree (btinfo->replay);
1228
1229 btinfo->insn_history = NULL;
1230 btinfo->call_history = NULL;
1231 btinfo->replay = NULL;
1232}
1233
b0627500
MM
1234/* Clear the branch trace maintenance histories in BTINFO. */
1235
1236static void
1237btrace_maint_clear (struct btrace_thread_info *btinfo)
1238{
1239 switch (btinfo->data.format)
1240 {
1241 default:
1242 break;
1243
1244 case BTRACE_FORMAT_BTS:
1245 btinfo->maint.variant.bts.packet_history.begin = 0;
1246 btinfo->maint.variant.bts.packet_history.end = 0;
1247 break;
1248
1249#if defined (HAVE_LIBIPT)
1250 case BTRACE_FORMAT_PT:
1251 xfree (btinfo->maint.variant.pt.packets);
1252
1253 btinfo->maint.variant.pt.packets = NULL;
1254 btinfo->maint.variant.pt.packet_history.begin = 0;
1255 btinfo->maint.variant.pt.packet_history.end = 0;
1256 break;
1257#endif /* defined (HAVE_LIBIPT) */
1258 }
1259}
1260
02d27625
MM
1261/* See btrace.h. */
1262
1263void
1264btrace_fetch (struct thread_info *tp)
1265{
1266 struct btrace_thread_info *btinfo;
969c39fb 1267 struct btrace_target_info *tinfo;
734b0e4b 1268 struct btrace_data btrace;
23a7fe75 1269 struct cleanup *cleanup;
969c39fb 1270 int errcode;
02d27625
MM
1271
1272 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1273
1274 btinfo = &tp->btrace;
969c39fb
MM
1275 tinfo = btinfo->target;
1276 if (tinfo == NULL)
1277 return;
1278
1279 /* There's no way we could get new trace while replaying.
1280 On the other hand, delta trace would return a partial record with the
1281 current PC, which is the replay PC, not the last PC, as expected. */
1282 if (btinfo->replay != NULL)
02d27625
MM
1283 return;
1284
734b0e4b
MM
1285 btrace_data_init (&btrace);
1286 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 1287
969c39fb
MM
1288 /* Let's first try to extend the trace we already have. */
1289 if (btinfo->end != NULL)
1290 {
1291 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1292 if (errcode == 0)
1293 {
1294 /* Success. Let's try to stitch the traces together. */
31fd9caa 1295 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1296 }
1297 else
1298 {
1299 /* We failed to read delta trace. Let's try to read new trace. */
1300 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1301
1302 /* If we got any new trace, discard what we have. */
734b0e4b 1303 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1304 btrace_clear (tp);
1305 }
1306
1307 /* If we were not able to read the trace, we start over. */
1308 if (errcode != 0)
1309 {
1310 btrace_clear (tp);
1311 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1312 }
1313 }
1314 else
1315 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1316
1317 /* If we were not able to read the branch trace, signal an error. */
1318 if (errcode != 0)
1319 error (_("Failed to read branch trace."));
1320
1321 /* Compute the trace, provided we have any. */
734b0e4b 1322 if (!btrace_data_empty (&btrace))
23a7fe75 1323 {
9be54cae
MM
1324 /* Store the raw trace data. The stored data will be cleared in
1325 btrace_clear, so we always append the new trace. */
1326 btrace_data_append (&btinfo->data, &btrace);
b0627500 1327 btrace_maint_clear (btinfo);
9be54cae 1328
969c39fb 1329 btrace_clear_history (btinfo);
76235df1 1330 btrace_compute_ftrace (tp, &btrace);
23a7fe75 1331 }
02d27625 1332
23a7fe75 1333 do_cleanups (cleanup);
02d27625
MM
1334}
1335
1336/* See btrace.h. */
1337
1338void
1339btrace_clear (struct thread_info *tp)
1340{
1341 struct btrace_thread_info *btinfo;
23a7fe75 1342 struct btrace_function *it, *trash;
02d27625
MM
1343
1344 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1345
0b722aec
MM
1346 /* Make sure btrace frames that may hold a pointer into the branch
1347 trace data are destroyed. */
1348 reinit_frame_cache ();
1349
02d27625
MM
1350 btinfo = &tp->btrace;
1351
23a7fe75
MM
1352 it = btinfo->begin;
1353 while (it != NULL)
1354 {
1355 trash = it;
1356 it = it->flow.next;
02d27625 1357
23a7fe75
MM
1358 xfree (trash);
1359 }
1360
1361 btinfo->begin = NULL;
1362 btinfo->end = NULL;
31fd9caa 1363 btinfo->ngaps = 0;
23a7fe75 1364
b0627500
MM
1365 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1366 btrace_maint_clear (btinfo);
9be54cae 1367 btrace_data_clear (&btinfo->data);
969c39fb 1368 btrace_clear_history (btinfo);
02d27625
MM
1369}
1370
1371/* See btrace.h. */
1372
1373void
1374btrace_free_objfile (struct objfile *objfile)
1375{
1376 struct thread_info *tp;
1377
1378 DEBUG ("free objfile");
1379
034f788c 1380 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1381 btrace_clear (tp);
1382}
c12a2917
MM
1383
1384#if defined (HAVE_LIBEXPAT)
1385
1386/* Check the btrace document version. */
1387
1388static void
1389check_xml_btrace_version (struct gdb_xml_parser *parser,
1390 const struct gdb_xml_element *element,
1391 void *user_data, VEC (gdb_xml_value_s) *attributes)
1392{
9a3c8263
SM
1393 const char *version
1394 = (const char *) xml_find_attribute (attributes, "version")->value;
c12a2917
MM
1395
1396 if (strcmp (version, "1.0") != 0)
1397 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1398}
1399
1400/* Parse a btrace "block" xml record. */
1401
1402static void
1403parse_xml_btrace_block (struct gdb_xml_parser *parser,
1404 const struct gdb_xml_element *element,
1405 void *user_data, VEC (gdb_xml_value_s) *attributes)
1406{
734b0e4b 1407 struct btrace_data *btrace;
c12a2917
MM
1408 struct btrace_block *block;
1409 ULONGEST *begin, *end;
1410
9a3c8263 1411 btrace = (struct btrace_data *) user_data;
734b0e4b
MM
1412
1413 switch (btrace->format)
1414 {
1415 case BTRACE_FORMAT_BTS:
1416 break;
1417
1418 case BTRACE_FORMAT_NONE:
1419 btrace->format = BTRACE_FORMAT_BTS;
1420 btrace->variant.bts.blocks = NULL;
1421 break;
1422
1423 default:
1424 gdb_xml_error (parser, _("Btrace format error."));
1425 }
c12a2917 1426
bc84451b
SM
1427 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1428 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
c12a2917 1429
734b0e4b 1430 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1431 block->begin = *begin;
1432 block->end = *end;
1433}
1434
b20a6524
MM
1435/* Parse a "raw" xml record. */
1436
1437static void
1438parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
e7b01ce0 1439 gdb_byte **pdata, size_t *psize)
b20a6524
MM
1440{
1441 struct cleanup *cleanup;
1442 gdb_byte *data, *bin;
e7b01ce0 1443 size_t len, size;
b20a6524
MM
1444
1445 len = strlen (body_text);
e7b01ce0 1446 if (len % 2 != 0)
b20a6524
MM
1447 gdb_xml_error (parser, _("Bad raw data size."));
1448
e7b01ce0
MM
1449 size = len / 2;
1450
224c3ddb 1451 bin = data = (gdb_byte *) xmalloc (size);
b20a6524
MM
1452 cleanup = make_cleanup (xfree, data);
1453
1454 /* We use hex encoding - see common/rsp-low.h. */
1455 while (len > 0)
1456 {
1457 char hi, lo;
1458
1459 hi = *body_text++;
1460 lo = *body_text++;
1461
1462 if (hi == 0 || lo == 0)
1463 gdb_xml_error (parser, _("Bad hex encoding."));
1464
1465 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1466 len -= 2;
1467 }
1468
1469 discard_cleanups (cleanup);
1470
1471 *pdata = data;
1472 *psize = size;
1473}
1474
1475/* Parse a btrace pt-config "cpu" xml record. */
1476
1477static void
1478parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1479 const struct gdb_xml_element *element,
1480 void *user_data,
1481 VEC (gdb_xml_value_s) *attributes)
1482{
1483 struct btrace_data *btrace;
1484 const char *vendor;
1485 ULONGEST *family, *model, *stepping;
1486
9a3c8263
SM
1487 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1488 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1489 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1490 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
b20a6524 1491
9a3c8263 1492 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1493
1494 if (strcmp (vendor, "GenuineIntel") == 0)
1495 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1496
1497 btrace->variant.pt.config.cpu.family = *family;
1498 btrace->variant.pt.config.cpu.model = *model;
1499 btrace->variant.pt.config.cpu.stepping = *stepping;
1500}
1501
1502/* Parse a btrace pt "raw" xml record. */
1503
1504static void
1505parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1506 const struct gdb_xml_element *element,
1507 void *user_data, const char *body_text)
1508{
1509 struct btrace_data *btrace;
1510
9a3c8263 1511 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1512 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1513 &btrace->variant.pt.size);
1514}
1515
1516/* Parse a btrace "pt" xml record. */
1517
1518static void
1519parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1520 const struct gdb_xml_element *element,
1521 void *user_data, VEC (gdb_xml_value_s) *attributes)
1522{
1523 struct btrace_data *btrace;
1524
9a3c8263 1525 btrace = (struct btrace_data *) user_data;
b20a6524
MM
1526 btrace->format = BTRACE_FORMAT_PT;
1527 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1528 btrace->variant.pt.data = NULL;
1529 btrace->variant.pt.size = 0;
1530}
1531
c12a2917
MM
1532static const struct gdb_xml_attribute block_attributes[] = {
1533 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1534 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1535 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1536};
1537
b20a6524
MM
1538static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1539 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1540 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1541 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1542 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1543 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1544};
1545
1546static const struct gdb_xml_element btrace_pt_config_children[] = {
1547 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1548 parse_xml_btrace_pt_config_cpu, NULL },
1549 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1550};
1551
1552static const struct gdb_xml_element btrace_pt_children[] = {
1553 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1554 NULL },
1555 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1556 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1557};
1558
c12a2917
MM
1559static const struct gdb_xml_attribute btrace_attributes[] = {
1560 { "version", GDB_XML_AF_NONE, NULL, NULL },
1561 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1562};
1563
1564static const struct gdb_xml_element btrace_children[] = {
1565 { "block", block_attributes, NULL,
1566 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
1567 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1568 NULL },
c12a2917
MM
1569 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1570};
1571
1572static const struct gdb_xml_element btrace_elements[] = {
1573 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1574 check_xml_btrace_version, NULL },
1575 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1576};
1577
1578#endif /* defined (HAVE_LIBEXPAT) */
1579
1580/* See btrace.h. */
1581
734b0e4b
MM
1582void
1583parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 1584{
c12a2917
MM
1585 struct cleanup *cleanup;
1586 int errcode;
1587
1588#if defined (HAVE_LIBEXPAT)
1589
734b0e4b
MM
1590 btrace->format = BTRACE_FORMAT_NONE;
1591
1592 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 1593 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 1594 buffer, btrace);
c12a2917 1595 if (errcode != 0)
969c39fb 1596 error (_("Error parsing branch trace."));
c12a2917
MM
1597
1598 /* Keep parse results. */
1599 discard_cleanups (cleanup);
1600
1601#else /* !defined (HAVE_LIBEXPAT) */
1602
1603 error (_("Cannot process branch trace. XML parsing is not supported."));
1604
1605#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 1606}
23a7fe75 1607
f4abbc16
MM
1608#if defined (HAVE_LIBEXPAT)
1609
1610/* Parse a btrace-conf "bts" xml record. */
1611
1612static void
1613parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1614 const struct gdb_xml_element *element,
1615 void *user_data, VEC (gdb_xml_value_s) *attributes)
1616{
1617 struct btrace_config *conf;
d33501a5 1618 struct gdb_xml_value *size;
f4abbc16 1619
9a3c8263 1620 conf = (struct btrace_config *) user_data;
f4abbc16 1621 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
1622 conf->bts.size = 0;
1623
1624 size = xml_find_attribute (attributes, "size");
1625 if (size != NULL)
b20a6524 1626 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
1627}
1628
b20a6524
MM
1629/* Parse a btrace-conf "pt" xml record. */
1630
1631static void
1632parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1633 const struct gdb_xml_element *element,
1634 void *user_data, VEC (gdb_xml_value_s) *attributes)
1635{
1636 struct btrace_config *conf;
1637 struct gdb_xml_value *size;
1638
9a3c8263 1639 conf = (struct btrace_config *) user_data;
b20a6524
MM
1640 conf->format = BTRACE_FORMAT_PT;
1641 conf->pt.size = 0;
1642
1643 size = xml_find_attribute (attributes, "size");
1644 if (size != NULL)
1645 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1646}
1647
1648static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1649 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1650 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1651};
1652
d33501a5
MM
1653static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1654 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1655 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1656};
1657
f4abbc16 1658static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
1659 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1660 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
1661 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1662 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
1663 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1664};
1665
1666static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1667 { "version", GDB_XML_AF_NONE, NULL, NULL },
1668 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1669};
1670
1671static const struct gdb_xml_element btrace_conf_elements[] = {
1672 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1673 GDB_XML_EF_NONE, NULL, NULL },
1674 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1675};
1676
1677#endif /* defined (HAVE_LIBEXPAT) */
1678
1679/* See btrace.h. */
1680
1681void
1682parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1683{
1684 int errcode;
1685
1686#if defined (HAVE_LIBEXPAT)
1687
1688 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1689 btrace_conf_elements, xml, conf);
1690 if (errcode != 0)
1691 error (_("Error parsing branch trace configuration."));
1692
1693#else /* !defined (HAVE_LIBEXPAT) */
1694
1695 error (_("XML parsing is not supported."));
1696
1697#endif /* !defined (HAVE_LIBEXPAT) */
1698}
1699
23a7fe75
MM
1700/* See btrace.h. */
1701
1702const struct btrace_insn *
1703btrace_insn_get (const struct btrace_insn_iterator *it)
1704{
1705 const struct btrace_function *bfun;
1706 unsigned int index, end;
1707
1708 index = it->index;
1709 bfun = it->function;
1710
31fd9caa
MM
1711 /* Check if the iterator points to a gap in the trace. */
1712 if (bfun->errcode != 0)
1713 return NULL;
1714
23a7fe75
MM
1715 /* The index is within the bounds of this function's instruction vector. */
1716 end = VEC_length (btrace_insn_s, bfun->insn);
1717 gdb_assert (0 < end);
1718 gdb_assert (index < end);
1719
1720 return VEC_index (btrace_insn_s, bfun->insn, index);
1721}
1722
1723/* See btrace.h. */
1724
1725unsigned int
1726btrace_insn_number (const struct btrace_insn_iterator *it)
1727{
1728 const struct btrace_function *bfun;
1729
1730 bfun = it->function;
31fd9caa
MM
1731
1732 /* Return zero if the iterator points to a gap in the trace. */
1733 if (bfun->errcode != 0)
1734 return 0;
1735
23a7fe75
MM
1736 return bfun->insn_offset + it->index;
1737}
1738
1739/* See btrace.h. */
1740
1741void
1742btrace_insn_begin (struct btrace_insn_iterator *it,
1743 const struct btrace_thread_info *btinfo)
1744{
1745 const struct btrace_function *bfun;
1746
1747 bfun = btinfo->begin;
1748 if (bfun == NULL)
1749 error (_("No trace."));
1750
1751 it->function = bfun;
1752 it->index = 0;
1753}
1754
1755/* See btrace.h. */
1756
1757void
1758btrace_insn_end (struct btrace_insn_iterator *it,
1759 const struct btrace_thread_info *btinfo)
1760{
1761 const struct btrace_function *bfun;
1762 unsigned int length;
1763
1764 bfun = btinfo->end;
1765 if (bfun == NULL)
1766 error (_("No trace."));
1767
23a7fe75
MM
1768 length = VEC_length (btrace_insn_s, bfun->insn);
1769
31fd9caa
MM
1770 /* The last function may either be a gap or it contains the current
1771 instruction, which is one past the end of the execution trace; ignore
1772 it. */
1773 if (length > 0)
1774 length -= 1;
1775
23a7fe75 1776 it->function = bfun;
31fd9caa 1777 it->index = length;
23a7fe75
MM
1778}
1779
1780/* See btrace.h. */
1781
1782unsigned int
1783btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1784{
1785 const struct btrace_function *bfun;
1786 unsigned int index, steps;
1787
1788 bfun = it->function;
1789 steps = 0;
1790 index = it->index;
1791
1792 while (stride != 0)
1793 {
1794 unsigned int end, space, adv;
1795
1796 end = VEC_length (btrace_insn_s, bfun->insn);
1797
31fd9caa
MM
1798 /* An empty function segment represents a gap in the trace. We count
1799 it as one instruction. */
1800 if (end == 0)
1801 {
1802 const struct btrace_function *next;
1803
1804 next = bfun->flow.next;
1805 if (next == NULL)
1806 break;
1807
1808 stride -= 1;
1809 steps += 1;
1810
1811 bfun = next;
1812 index = 0;
1813
1814 continue;
1815 }
1816
23a7fe75
MM
1817 gdb_assert (0 < end);
1818 gdb_assert (index < end);
1819
1820 /* Compute the number of instructions remaining in this segment. */
1821 space = end - index;
1822
1823 /* Advance the iterator as far as possible within this segment. */
1824 adv = min (space, stride);
1825 stride -= adv;
1826 index += adv;
1827 steps += adv;
1828
1829 /* Move to the next function if we're at the end of this one. */
1830 if (index == end)
1831 {
1832 const struct btrace_function *next;
1833
1834 next = bfun->flow.next;
1835 if (next == NULL)
1836 {
1837 /* We stepped past the last function.
1838
1839 Let's adjust the index to point to the last instruction in
1840 the previous function. */
1841 index -= 1;
1842 steps -= 1;
1843 break;
1844 }
1845
1846 /* We now point to the first instruction in the new function. */
1847 bfun = next;
1848 index = 0;
1849 }
1850
1851 /* We did make progress. */
1852 gdb_assert (adv > 0);
1853 }
1854
1855 /* Update the iterator. */
1856 it->function = bfun;
1857 it->index = index;
1858
1859 return steps;
1860}
1861
1862/* See btrace.h. */
1863
1864unsigned int
1865btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1866{
1867 const struct btrace_function *bfun;
1868 unsigned int index, steps;
1869
1870 bfun = it->function;
1871 steps = 0;
1872 index = it->index;
1873
1874 while (stride != 0)
1875 {
1876 unsigned int adv;
1877
1878 /* Move to the previous function if we're at the start of this one. */
1879 if (index == 0)
1880 {
1881 const struct btrace_function *prev;
1882
1883 prev = bfun->flow.prev;
1884 if (prev == NULL)
1885 break;
1886
1887 /* We point to one after the last instruction in the new function. */
1888 bfun = prev;
1889 index = VEC_length (btrace_insn_s, bfun->insn);
1890
31fd9caa
MM
1891 /* An empty function segment represents a gap in the trace. We count
1892 it as one instruction. */
1893 if (index == 0)
1894 {
1895 stride -= 1;
1896 steps += 1;
1897
1898 continue;
1899 }
23a7fe75
MM
1900 }
1901
1902 /* Advance the iterator as far as possible within this segment. */
1903 adv = min (index, stride);
31fd9caa 1904
23a7fe75
MM
1905 stride -= adv;
1906 index -= adv;
1907 steps += adv;
1908
1909 /* We did make progress. */
1910 gdb_assert (adv > 0);
1911 }
1912
1913 /* Update the iterator. */
1914 it->function = bfun;
1915 it->index = index;
1916
1917 return steps;
1918}
1919
1920/* See btrace.h. */
1921
1922int
1923btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1924 const struct btrace_insn_iterator *rhs)
1925{
1926 unsigned int lnum, rnum;
1927
1928 lnum = btrace_insn_number (lhs);
1929 rnum = btrace_insn_number (rhs);
1930
31fd9caa
MM
1931 /* A gap has an instruction number of zero. Things are getting more
1932 complicated if gaps are involved.
1933
1934 We take the instruction number offset from the iterator's function.
1935 This is the number of the first instruction after the gap.
1936
1937 This is OK as long as both lhs and rhs point to gaps. If only one of
1938 them does, we need to adjust the number based on the other's regular
1939 instruction number. Otherwise, a gap might compare equal to an
1940 instruction. */
1941
1942 if (lnum == 0 && rnum == 0)
1943 {
1944 lnum = lhs->function->insn_offset;
1945 rnum = rhs->function->insn_offset;
1946 }
1947 else if (lnum == 0)
1948 {
1949 lnum = lhs->function->insn_offset;
1950
1951 if (lnum == rnum)
1952 lnum -= 1;
1953 }
1954 else if (rnum == 0)
1955 {
1956 rnum = rhs->function->insn_offset;
1957
1958 if (rnum == lnum)
1959 rnum -= 1;
1960 }
1961
23a7fe75
MM
1962 return (int) (lnum - rnum);
1963}
1964
1965/* See btrace.h. */
1966
1967int
1968btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1969 const struct btrace_thread_info *btinfo,
1970 unsigned int number)
1971{
1972 const struct btrace_function *bfun;
31fd9caa 1973 unsigned int end, length;
23a7fe75
MM
1974
1975 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
31fd9caa
MM
1976 {
1977 /* Skip gaps. */
1978 if (bfun->errcode != 0)
1979 continue;
1980
1981 if (bfun->insn_offset <= number)
1982 break;
1983 }
23a7fe75
MM
1984
1985 if (bfun == NULL)
1986 return 0;
1987
31fd9caa
MM
1988 length = VEC_length (btrace_insn_s, bfun->insn);
1989 gdb_assert (length > 0);
1990
1991 end = bfun->insn_offset + length;
23a7fe75
MM
1992 if (end <= number)
1993 return 0;
1994
1995 it->function = bfun;
1996 it->index = number - bfun->insn_offset;
1997
1998 return 1;
1999}
2000
2001/* See btrace.h. */
2002
2003const struct btrace_function *
2004btrace_call_get (const struct btrace_call_iterator *it)
2005{
2006 return it->function;
2007}
2008
2009/* See btrace.h. */
2010
2011unsigned int
2012btrace_call_number (const struct btrace_call_iterator *it)
2013{
2014 const struct btrace_thread_info *btinfo;
2015 const struct btrace_function *bfun;
2016 unsigned int insns;
2017
2018 btinfo = it->btinfo;
2019 bfun = it->function;
2020 if (bfun != NULL)
2021 return bfun->number;
2022
2023 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2024 number of the last function. */
2025 bfun = btinfo->end;
2026 insns = VEC_length (btrace_insn_s, bfun->insn);
2027
2028 /* If the function contains only a single instruction (i.e. the current
2029 instruction), it will be skipped and its number is already the number
2030 we seek. */
2031 if (insns == 1)
2032 return bfun->number;
2033
2034 /* Otherwise, return one more than the number of the last function. */
2035 return bfun->number + 1;
2036}
2037
2038/* See btrace.h. */
2039
2040void
2041btrace_call_begin (struct btrace_call_iterator *it,
2042 const struct btrace_thread_info *btinfo)
2043{
2044 const struct btrace_function *bfun;
2045
2046 bfun = btinfo->begin;
2047 if (bfun == NULL)
2048 error (_("No trace."));
2049
2050 it->btinfo = btinfo;
2051 it->function = bfun;
2052}
2053
2054/* See btrace.h. */
2055
2056void
2057btrace_call_end (struct btrace_call_iterator *it,
2058 const struct btrace_thread_info *btinfo)
2059{
2060 const struct btrace_function *bfun;
2061
2062 bfun = btinfo->end;
2063 if (bfun == NULL)
2064 error (_("No trace."));
2065
2066 it->btinfo = btinfo;
2067 it->function = NULL;
2068}
2069
2070/* See btrace.h. */
2071
2072unsigned int
2073btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2074{
2075 const struct btrace_function *bfun;
2076 unsigned int steps;
2077
2078 bfun = it->function;
2079 steps = 0;
2080 while (bfun != NULL)
2081 {
2082 const struct btrace_function *next;
2083 unsigned int insns;
2084
2085 next = bfun->flow.next;
2086 if (next == NULL)
2087 {
2088 /* Ignore the last function if it only contains a single
2089 (i.e. the current) instruction. */
2090 insns = VEC_length (btrace_insn_s, bfun->insn);
2091 if (insns == 1)
2092 steps -= 1;
2093 }
2094
2095 if (stride == steps)
2096 break;
2097
2098 bfun = next;
2099 steps += 1;
2100 }
2101
2102 it->function = bfun;
2103 return steps;
2104}
2105
2106/* See btrace.h. */
2107
2108unsigned int
2109btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2110{
2111 const struct btrace_thread_info *btinfo;
2112 const struct btrace_function *bfun;
2113 unsigned int steps;
2114
2115 bfun = it->function;
2116 steps = 0;
2117
2118 if (bfun == NULL)
2119 {
2120 unsigned int insns;
2121
2122 btinfo = it->btinfo;
2123 bfun = btinfo->end;
2124 if (bfun == NULL)
2125 return 0;
2126
2127 /* Ignore the last function if it only contains a single
2128 (i.e. the current) instruction. */
2129 insns = VEC_length (btrace_insn_s, bfun->insn);
2130 if (insns == 1)
2131 bfun = bfun->flow.prev;
2132
2133 if (bfun == NULL)
2134 return 0;
2135
2136 steps += 1;
2137 }
2138
2139 while (steps < stride)
2140 {
2141 const struct btrace_function *prev;
2142
2143 prev = bfun->flow.prev;
2144 if (prev == NULL)
2145 break;
2146
2147 bfun = prev;
2148 steps += 1;
2149 }
2150
2151 it->function = bfun;
2152 return steps;
2153}
2154
2155/* See btrace.h. */
2156
2157int
2158btrace_call_cmp (const struct btrace_call_iterator *lhs,
2159 const struct btrace_call_iterator *rhs)
2160{
2161 unsigned int lnum, rnum;
2162
2163 lnum = btrace_call_number (lhs);
2164 rnum = btrace_call_number (rhs);
2165
2166 return (int) (lnum - rnum);
2167}
2168
2169/* See btrace.h. */
2170
2171int
2172btrace_find_call_by_number (struct btrace_call_iterator *it,
2173 const struct btrace_thread_info *btinfo,
2174 unsigned int number)
2175{
2176 const struct btrace_function *bfun;
2177
2178 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2179 {
2180 unsigned int bnum;
2181
2182 bnum = bfun->number;
2183 if (number == bnum)
2184 {
2185 it->btinfo = btinfo;
2186 it->function = bfun;
2187 return 1;
2188 }
2189
2190 /* Functions are ordered and numbered consecutively. We could bail out
2191 earlier. On the other hand, it is very unlikely that we search for
2192 a nonexistent function. */
2193 }
2194
2195 return 0;
2196}
2197
2198/* See btrace.h. */
2199
2200void
2201btrace_set_insn_history (struct btrace_thread_info *btinfo,
2202 const struct btrace_insn_iterator *begin,
2203 const struct btrace_insn_iterator *end)
2204{
2205 if (btinfo->insn_history == NULL)
8d749320 2206 btinfo->insn_history = XCNEW (struct btrace_insn_history);
23a7fe75
MM
2207
2208 btinfo->insn_history->begin = *begin;
2209 btinfo->insn_history->end = *end;
2210}
2211
2212/* See btrace.h. */
2213
2214void
2215btrace_set_call_history (struct btrace_thread_info *btinfo,
2216 const struct btrace_call_iterator *begin,
2217 const struct btrace_call_iterator *end)
2218{
2219 gdb_assert (begin->btinfo == end->btinfo);
2220
2221 if (btinfo->call_history == NULL)
8d749320 2222 btinfo->call_history = XCNEW (struct btrace_call_history);
23a7fe75
MM
2223
2224 btinfo->call_history->begin = *begin;
2225 btinfo->call_history->end = *end;
2226}
07bbe694
MM
2227
2228/* See btrace.h. */
2229
2230int
2231btrace_is_replaying (struct thread_info *tp)
2232{
2233 return tp->btrace.replay != NULL;
2234}
6e07b1d2
MM
2235
2236/* See btrace.h. */
2237
2238int
2239btrace_is_empty (struct thread_info *tp)
2240{
2241 struct btrace_insn_iterator begin, end;
2242 struct btrace_thread_info *btinfo;
2243
2244 btinfo = &tp->btrace;
2245
2246 if (btinfo->begin == NULL)
2247 return 1;
2248
2249 btrace_insn_begin (&begin, btinfo);
2250 btrace_insn_end (&end, btinfo);
2251
2252 return btrace_insn_cmp (&begin, &end) == 0;
2253}
734b0e4b
MM
2254
2255/* Forward the cleanup request. */
2256
2257static void
2258do_btrace_data_cleanup (void *arg)
2259{
9a3c8263 2260 btrace_data_fini ((struct btrace_data *) arg);
734b0e4b
MM
2261}
2262
2263/* See btrace.h. */
2264
2265struct cleanup *
2266make_cleanup_btrace_data (struct btrace_data *data)
2267{
2268 return make_cleanup (do_btrace_data_cleanup, data);
2269}
b0627500
MM
2270
2271#if defined (HAVE_LIBIPT)
2272
2273/* Print a single packet. */
2274
2275static void
2276pt_print_packet (const struct pt_packet *packet)
2277{
2278 switch (packet->type)
2279 {
2280 default:
2281 printf_unfiltered (("[??: %x]"), packet->type);
2282 break;
2283
2284 case ppt_psb:
2285 printf_unfiltered (("psb"));
2286 break;
2287
2288 case ppt_psbend:
2289 printf_unfiltered (("psbend"));
2290 break;
2291
2292 case ppt_pad:
2293 printf_unfiltered (("pad"));
2294 break;
2295
2296 case ppt_tip:
2297 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2298 packet->payload.ip.ipc,
2299 packet->payload.ip.ip);
2300 break;
2301
2302 case ppt_tip_pge:
2303 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2304 packet->payload.ip.ipc,
2305 packet->payload.ip.ip);
2306 break;
2307
2308 case ppt_tip_pgd:
2309 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2310 packet->payload.ip.ipc,
2311 packet->payload.ip.ip);
2312 break;
2313
2314 case ppt_fup:
2315 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2316 packet->payload.ip.ipc,
2317 packet->payload.ip.ip);
2318 break;
2319
2320 case ppt_tnt_8:
2321 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2322 packet->payload.tnt.bit_size,
2323 packet->payload.tnt.payload);
2324 break;
2325
2326 case ppt_tnt_64:
2327 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2328 packet->payload.tnt.bit_size,
2329 packet->payload.tnt.payload);
2330 break;
2331
2332 case ppt_pip:
37fdfe4c
MM
2333 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2334 packet->payload.pip.nr ? (" nr") : (""));
b0627500
MM
2335 break;
2336
2337 case ppt_tsc:
2338 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2339 break;
2340
2341 case ppt_cbr:
2342 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2343 break;
2344
2345 case ppt_mode:
2346 switch (packet->payload.mode.leaf)
2347 {
2348 default:
2349 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2350 break;
2351
2352 case pt_mol_exec:
2353 printf_unfiltered (("mode.exec%s%s"),
2354 packet->payload.mode.bits.exec.csl
2355 ? (" cs.l") : (""),
2356 packet->payload.mode.bits.exec.csd
2357 ? (" cs.d") : (""));
2358 break;
2359
2360 case pt_mol_tsx:
2361 printf_unfiltered (("mode.tsx%s%s"),
2362 packet->payload.mode.bits.tsx.intx
2363 ? (" intx") : (""),
2364 packet->payload.mode.bits.tsx.abrt
2365 ? (" abrt") : (""));
2366 break;
2367 }
2368 break;
2369
2370 case ppt_ovf:
2371 printf_unfiltered (("ovf"));
2372 break;
2373
37fdfe4c
MM
2374 case ppt_stop:
2375 printf_unfiltered (("stop"));
2376 break;
2377
2378 case ppt_vmcs:
2379 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2380 break;
2381
2382 case ppt_tma:
2383 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2384 packet->payload.tma.fc);
2385 break;
2386
2387 case ppt_mtc:
2388 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2389 break;
2390
2391 case ppt_cyc:
2392 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2393 break;
2394
2395 case ppt_mnt:
2396 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2397 break;
b0627500
MM
2398 }
2399}
2400
2401/* Decode packets into MAINT using DECODER. */
2402
2403static void
2404btrace_maint_decode_pt (struct btrace_maint_info *maint,
2405 struct pt_packet_decoder *decoder)
2406{
2407 int errcode;
2408
2409 for (;;)
2410 {
2411 struct btrace_pt_packet packet;
2412
2413 errcode = pt_pkt_sync_forward (decoder);
2414 if (errcode < 0)
2415 break;
2416
2417 for (;;)
2418 {
2419 pt_pkt_get_offset (decoder, &packet.offset);
2420
2421 errcode = pt_pkt_next (decoder, &packet.packet,
2422 sizeof(packet.packet));
2423 if (errcode < 0)
2424 break;
2425
2426 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2427 {
2428 packet.errcode = pt_errcode (errcode);
2429 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2430 &packet);
2431 }
2432 }
2433
2434 if (errcode == -pte_eos)
2435 break;
2436
2437 packet.errcode = pt_errcode (errcode);
2438 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2439 &packet);
2440
2441 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2442 packet.offset, pt_errstr (packet.errcode));
2443 }
2444
2445 if (errcode != -pte_eos)
2446 warning (_("Failed to synchronize onto the Intel(R) Processor Trace "
2447 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2448}
2449
2450/* Update the packet history in BTINFO. */
2451
2452static void
2453btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2454{
2455 volatile struct gdb_exception except;
2456 struct pt_packet_decoder *decoder;
2457 struct btrace_data_pt *pt;
2458 struct pt_config config;
2459 int errcode;
2460
2461 pt = &btinfo->data.variant.pt;
2462
2463 /* Nothing to do if there is no trace. */
2464 if (pt->size == 0)
2465 return;
2466
2467 memset (&config, 0, sizeof(config));
2468
2469 config.size = sizeof (config);
2470 config.begin = pt->data;
2471 config.end = pt->data + pt->size;
2472
2473 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2474 config.cpu.family = pt->config.cpu.family;
2475 config.cpu.model = pt->config.cpu.model;
2476 config.cpu.stepping = pt->config.cpu.stepping;
2477
2478 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2479 if (errcode < 0)
2480 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
2481 pt_errstr (pt_errcode (errcode)));
2482
2483 decoder = pt_pkt_alloc_decoder (&config);
2484 if (decoder == NULL)
2485 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
2486
2487 TRY
2488 {
2489 btrace_maint_decode_pt (&btinfo->maint, decoder);
2490 }
2491 CATCH (except, RETURN_MASK_ALL)
2492 {
2493 pt_pkt_free_decoder (decoder);
2494
2495 if (except.reason < 0)
2496 throw_exception (except);
2497 }
2498 END_CATCH
2499
2500 pt_pkt_free_decoder (decoder);
2501}
2502
2503#endif /* !defined (HAVE_LIBIPT) */
2504
2505/* Update the packet maintenance information for BTINFO and store the
2506 low and high bounds into BEGIN and END, respectively.
2507 Store the current iterator state into FROM and TO. */
2508
2509static void
2510btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2511 unsigned int *begin, unsigned int *end,
2512 unsigned int *from, unsigned int *to)
2513{
2514 switch (btinfo->data.format)
2515 {
2516 default:
2517 *begin = 0;
2518 *end = 0;
2519 *from = 0;
2520 *to = 0;
2521 break;
2522
2523 case BTRACE_FORMAT_BTS:
2524 /* Nothing to do - we operate directly on BTINFO->DATA. */
2525 *begin = 0;
2526 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2527 *from = btinfo->maint.variant.bts.packet_history.begin;
2528 *to = btinfo->maint.variant.bts.packet_history.end;
2529 break;
2530
2531#if defined (HAVE_LIBIPT)
2532 case BTRACE_FORMAT_PT:
2533 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2534 btrace_maint_update_pt_packets (btinfo);
2535
2536 *begin = 0;
2537 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2538 *from = btinfo->maint.variant.pt.packet_history.begin;
2539 *to = btinfo->maint.variant.pt.packet_history.end;
2540 break;
2541#endif /* defined (HAVE_LIBIPT) */
2542 }
2543}
2544
2545/* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2546 update the current iterator position. */
2547
2548static void
2549btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2550 unsigned int begin, unsigned int end)
2551{
2552 switch (btinfo->data.format)
2553 {
2554 default:
2555 break;
2556
2557 case BTRACE_FORMAT_BTS:
2558 {
2559 VEC (btrace_block_s) *blocks;
2560 unsigned int blk;
2561
2562 blocks = btinfo->data.variant.bts.blocks;
2563 for (blk = begin; blk < end; ++blk)
2564 {
2565 const btrace_block_s *block;
2566
2567 block = VEC_index (btrace_block_s, blocks, blk);
2568
2569 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2570 core_addr_to_string_nz (block->begin),
2571 core_addr_to_string_nz (block->end));
2572 }
2573
2574 btinfo->maint.variant.bts.packet_history.begin = begin;
2575 btinfo->maint.variant.bts.packet_history.end = end;
2576 }
2577 break;
2578
2579#if defined (HAVE_LIBIPT)
2580 case BTRACE_FORMAT_PT:
2581 {
2582 VEC (btrace_pt_packet_s) *packets;
2583 unsigned int pkt;
2584
2585 packets = btinfo->maint.variant.pt.packets;
2586 for (pkt = begin; pkt < end; ++pkt)
2587 {
2588 const struct btrace_pt_packet *packet;
2589
2590 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2591
2592 printf_unfiltered ("%u\t", pkt);
2593 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2594
2595 if (packet->errcode == pte_ok)
2596 pt_print_packet (&packet->packet);
2597 else
2598 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2599
2600 printf_unfiltered ("\n");
2601 }
2602
2603 btinfo->maint.variant.pt.packet_history.begin = begin;
2604 btinfo->maint.variant.pt.packet_history.end = end;
2605 }
2606 break;
2607#endif /* defined (HAVE_LIBIPT) */
2608 }
2609}
2610
2611/* Read a number from an argument string. */
2612
2613static unsigned int
2614get_uint (char **arg)
2615{
2616 char *begin, *end, *pos;
2617 unsigned long number;
2618
2619 begin = *arg;
2620 pos = skip_spaces (begin);
2621
2622 if (!isdigit (*pos))
2623 error (_("Expected positive number, got: %s."), pos);
2624
2625 number = strtoul (pos, &end, 10);
2626 if (number > UINT_MAX)
2627 error (_("Number too big."));
2628
2629 *arg += (end - begin);
2630
2631 return (unsigned int) number;
2632}
2633
2634/* Read a context size from an argument string. */
2635
2636static int
2637get_context_size (char **arg)
2638{
2639 char *pos;
2640 int number;
2641
2642 pos = skip_spaces (*arg);
2643
2644 if (!isdigit (*pos))
2645 error (_("Expected positive number, got: %s."), pos);
2646
2647 return strtol (pos, arg, 10);
2648}
2649
2650/* Complain about junk at the end of an argument string. */
2651
2652static void
2653no_chunk (char *arg)
2654{
2655 if (*arg != 0)
2656 error (_("Junk after argument: %s."), arg);
2657}
2658
2659/* The "maintenance btrace packet-history" command. */
2660
2661static void
2662maint_btrace_packet_history_cmd (char *arg, int from_tty)
2663{
2664 struct btrace_thread_info *btinfo;
2665 struct thread_info *tp;
2666 unsigned int size, begin, end, from, to;
2667
2668 tp = find_thread_ptid (inferior_ptid);
2669 if (tp == NULL)
2670 error (_("No thread."));
2671
2672 size = 10;
2673 btinfo = &tp->btrace;
2674
2675 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2676 if (begin == end)
2677 {
2678 printf_unfiltered (_("No trace.\n"));
2679 return;
2680 }
2681
2682 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2683 {
2684 from = to;
2685
2686 if (end - from < size)
2687 size = end - from;
2688 to = from + size;
2689 }
2690 else if (strcmp (arg, "-") == 0)
2691 {
2692 to = from;
2693
2694 if (to - begin < size)
2695 size = to - begin;
2696 from = to - size;
2697 }
2698 else
2699 {
2700 from = get_uint (&arg);
2701 if (end <= from)
2702 error (_("'%u' is out of range."), from);
2703
2704 arg = skip_spaces (arg);
2705 if (*arg == ',')
2706 {
2707 arg = skip_spaces (++arg);
2708
2709 if (*arg == '+')
2710 {
2711 arg += 1;
2712 size = get_context_size (&arg);
2713
2714 no_chunk (arg);
2715
2716 if (end - from < size)
2717 size = end - from;
2718 to = from + size;
2719 }
2720 else if (*arg == '-')
2721 {
2722 arg += 1;
2723 size = get_context_size (&arg);
2724
2725 no_chunk (arg);
2726
2727 /* Include the packet given as first argument. */
2728 from += 1;
2729 to = from;
2730
2731 if (to - begin < size)
2732 size = to - begin;
2733 from = to - size;
2734 }
2735 else
2736 {
2737 to = get_uint (&arg);
2738
2739 /* Include the packet at the second argument and silently
2740 truncate the range. */
2741 if (to < end)
2742 to += 1;
2743 else
2744 to = end;
2745
2746 no_chunk (arg);
2747 }
2748 }
2749 else
2750 {
2751 no_chunk (arg);
2752
2753 if (end - from < size)
2754 size = end - from;
2755 to = from + size;
2756 }
2757
2758 dont_repeat ();
2759 }
2760
2761 btrace_maint_print_packets (btinfo, from, to);
2762}
2763
2764/* The "maintenance btrace clear-packet-history" command. */
2765
2766static void
2767maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2768{
2769 struct btrace_thread_info *btinfo;
2770 struct thread_info *tp;
2771
2772 if (args != NULL && *args != 0)
2773 error (_("Invalid argument."));
2774
2775 tp = find_thread_ptid (inferior_ptid);
2776 if (tp == NULL)
2777 error (_("No thread."));
2778
2779 btinfo = &tp->btrace;
2780
2781 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2782 btrace_maint_clear (btinfo);
2783 btrace_data_clear (&btinfo->data);
2784}
2785
2786/* The "maintenance btrace clear" command. */
2787
2788static void
2789maint_btrace_clear_cmd (char *args, int from_tty)
2790{
2791 struct btrace_thread_info *btinfo;
2792 struct thread_info *tp;
2793
2794 if (args != NULL && *args != 0)
2795 error (_("Invalid argument."));
2796
2797 tp = find_thread_ptid (inferior_ptid);
2798 if (tp == NULL)
2799 error (_("No thread."));
2800
2801 btrace_clear (tp);
2802}
2803
2804/* The "maintenance btrace" command. */
2805
2806static void
2807maint_btrace_cmd (char *args, int from_tty)
2808{
2809 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2810 gdb_stdout);
2811}
2812
2813/* The "maintenance set btrace" command. */
2814
2815static void
2816maint_btrace_set_cmd (char *args, int from_tty)
2817{
2818 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2819 gdb_stdout);
2820}
2821
2822/* The "maintenance show btrace" command. */
2823
2824static void
2825maint_btrace_show_cmd (char *args, int from_tty)
2826{
2827 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2828 all_commands, gdb_stdout);
2829}
2830
2831/* The "maintenance set btrace pt" command. */
2832
2833static void
2834maint_btrace_pt_set_cmd (char *args, int from_tty)
2835{
2836 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2837 all_commands, gdb_stdout);
2838}
2839
2840/* The "maintenance show btrace pt" command. */
2841
2842static void
2843maint_btrace_pt_show_cmd (char *args, int from_tty)
2844{
2845 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2846 all_commands, gdb_stdout);
2847}
2848
2849/* The "maintenance info btrace" command. */
2850
2851static void
2852maint_info_btrace_cmd (char *args, int from_tty)
2853{
2854 struct btrace_thread_info *btinfo;
2855 struct thread_info *tp;
2856 const struct btrace_config *conf;
2857
2858 if (args != NULL && *args != 0)
2859 error (_("Invalid argument."));
2860
2861 tp = find_thread_ptid (inferior_ptid);
2862 if (tp == NULL)
2863 error (_("No thread."));
2864
2865 btinfo = &tp->btrace;
2866
2867 conf = btrace_conf (btinfo);
2868 if (conf == NULL)
2869 error (_("No btrace configuration."));
2870
2871 printf_unfiltered (_("Format: %s.\n"),
2872 btrace_format_string (conf->format));
2873
2874 switch (conf->format)
2875 {
2876 default:
2877 break;
2878
2879 case BTRACE_FORMAT_BTS:
2880 printf_unfiltered (_("Number of packets: %u.\n"),
2881 VEC_length (btrace_block_s,
2882 btinfo->data.variant.bts.blocks));
2883 break;
2884
2885#if defined (HAVE_LIBIPT)
2886 case BTRACE_FORMAT_PT:
2887 {
2888 struct pt_version version;
2889
2890 version = pt_library_version ();
2891 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2892 version.minor, version.build,
2893 version.ext != NULL ? version.ext : "");
2894
2895 btrace_maint_update_pt_packets (btinfo);
2896 printf_unfiltered (_("Number of packets: %u.\n"),
2897 VEC_length (btrace_pt_packet_s,
2898 btinfo->maint.variant.pt.packets));
2899 }
2900 break;
2901#endif /* defined (HAVE_LIBIPT) */
2902 }
2903}
2904
2905/* The "maint show btrace pt skip-pad" show value function. */
2906
2907static void
2908show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2909 struct cmd_list_element *c,
2910 const char *value)
2911{
2912 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2913}
2914
2915
2916/* Initialize btrace maintenance commands. */
2917
2918void _initialize_btrace (void);
2919void
2920_initialize_btrace (void)
2921{
2922 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2923 _("Info about branch tracing data."), &maintenanceinfolist);
2924
2925 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2926 _("Branch tracing maintenance commands."),
2927 &maint_btrace_cmdlist, "maintenance btrace ",
2928 0, &maintenancelist);
2929
2930 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2931Set branch tracing specific variables."),
2932 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2933 0, &maintenance_set_cmdlist);
2934
2935 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
2936Set Intel(R) Processor Trace specific variables."),
2937 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2938 0, &maint_btrace_set_cmdlist);
2939
2940 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2941Show branch tracing specific variables."),
2942 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2943 0, &maintenance_show_cmdlist);
2944
2945 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
2946Show Intel(R) Processor Trace specific variables."),
2947 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2948 0, &maint_btrace_show_cmdlist);
2949
2950 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2951 &maint_btrace_pt_skip_pad, _("\
2952Set whether PAD packets should be skipped in the btrace packet history."), _("\
2953Show whether PAD packets should be skipped in the btrace packet history."),_("\
2954When enabled, PAD packets are ignored in the btrace packet history."),
2955 NULL, show_maint_btrace_pt_skip_pad,
2956 &maint_btrace_pt_set_cmdlist,
2957 &maint_btrace_pt_show_cmdlist);
2958
2959 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2960 _("Print the raw branch tracing data.\n\
2961With no argument, print ten more packets after the previous ten-line print.\n\
2962With '-' as argument print ten packets before a previous ten-line print.\n\
2963One argument specifies the starting packet of a ten-line print.\n\
2964Two arguments with comma between specify starting and ending packets to \
2965print.\n\
2966Preceded with '+'/'-' the second argument specifies the distance from the \
2967first.\n"),
2968 &maint_btrace_cmdlist);
2969
2970 add_cmd ("clear-packet-history", class_maintenance,
2971 maint_btrace_clear_packet_history_cmd,
2972 _("Clears the branch tracing packet history.\n\
2973Discards the raw branch tracing data but not the execution history data.\n\
2974"),
2975 &maint_btrace_cmdlist);
2976
2977 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
2978 _("Clears the branch tracing data.\n\
2979Discards the raw branch tracing data and the execution history data.\n\
2980The next 'record' command will fetch the branch tracing data anew.\n\
2981"),
2982 &maint_btrace_cmdlist);
2983
2984}