]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/btrace.c
btrace: diagnose "record btrace pt" without libipt
[thirdparty/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40
41 /* Command lists for btrace maintenance commands. */
42 static struct cmd_list_element *maint_btrace_cmdlist;
43 static struct cmd_list_element *maint_btrace_set_cmdlist;
44 static struct cmd_list_element *maint_btrace_show_cmdlist;
45 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
47
48 /* Control whether to skip PAD packets when computing the packet history. */
49 static int maint_btrace_pt_skip_pad = 1;
50
51 static void btrace_add_pc (struct thread_info *tp);
52
53 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
54 when used in if statements. */
55
56 #define DEBUG(msg, args...) \
57 do \
58 { \
59 if (record_debug != 0) \
60 fprintf_unfiltered (gdb_stdlog, \
61 "[btrace] " msg "\n", ##args); \
62 } \
63 while (0)
64
65 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
66
67 /* Return the function name of a recorded function segment for printing.
68 This function never returns NULL. */
69
70 static const char *
71 ftrace_print_function_name (const struct btrace_function *bfun)
72 {
73 struct minimal_symbol *msym;
74 struct symbol *sym;
75
76 msym = bfun->msym;
77 sym = bfun->sym;
78
79 if (sym != NULL)
80 return SYMBOL_PRINT_NAME (sym);
81
82 if (msym != NULL)
83 return MSYMBOL_PRINT_NAME (msym);
84
85 return "<unknown>";
86 }
87
88 /* Return the file name of a recorded function segment for printing.
89 This function never returns NULL. */
90
91 static const char *
92 ftrace_print_filename (const struct btrace_function *bfun)
93 {
94 struct symbol *sym;
95 const char *filename;
96
97 sym = bfun->sym;
98
99 if (sym != NULL)
100 filename = symtab_to_filename_for_display (symbol_symtab (sym));
101 else
102 filename = "<unknown>";
103
104 return filename;
105 }
106
107 /* Return a string representation of the address of an instruction.
108 This function never returns NULL. */
109
110 static const char *
111 ftrace_print_insn_addr (const struct btrace_insn *insn)
112 {
113 if (insn == NULL)
114 return "<nil>";
115
116 return core_addr_to_string_nz (insn->pc);
117 }
118
119 /* Print an ftrace debug status message. */
120
121 static void
122 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
123 {
124 const char *fun, *file;
125 unsigned int ibegin, iend;
126 int level;
127
128 fun = ftrace_print_function_name (bfun);
129 file = ftrace_print_filename (bfun);
130 level = bfun->level;
131
132 ibegin = bfun->insn_offset;
133 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
134
135 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
136 prefix, fun, file, level, ibegin, iend);
137 }
138
139 /* Return non-zero if BFUN does not match MFUN and FUN,
140 return zero otherwise. */
141
142 static int
143 ftrace_function_switched (const struct btrace_function *bfun,
144 const struct minimal_symbol *mfun,
145 const struct symbol *fun)
146 {
147 struct minimal_symbol *msym;
148 struct symbol *sym;
149
150 msym = bfun->msym;
151 sym = bfun->sym;
152
153 /* If the minimal symbol changed, we certainly switched functions. */
154 if (mfun != NULL && msym != NULL
155 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
156 return 1;
157
158 /* If the symbol changed, we certainly switched functions. */
159 if (fun != NULL && sym != NULL)
160 {
161 const char *bfname, *fname;
162
163 /* Check the function name. */
164 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
165 return 1;
166
167 /* Check the location of those functions, as well. */
168 bfname = symtab_to_fullname (symbol_symtab (sym));
169 fname = symtab_to_fullname (symbol_symtab (fun));
170 if (filename_cmp (fname, bfname) != 0)
171 return 1;
172 }
173
174 /* If we lost symbol information, we switched functions. */
175 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
176 return 1;
177
178 /* If we gained symbol information, we switched functions. */
179 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
180 return 1;
181
182 return 0;
183 }
184
185 /* Allocate and initialize a new branch trace function segment.
186 PREV is the chronologically preceding function segment.
187 MFUN and FUN are the symbol information we have for this function. */
188
189 static struct btrace_function *
190 ftrace_new_function (struct btrace_function *prev,
191 struct minimal_symbol *mfun,
192 struct symbol *fun)
193 {
194 struct btrace_function *bfun;
195
196 bfun = XCNEW (struct btrace_function);
197
198 bfun->msym = mfun;
199 bfun->sym = fun;
200 bfun->flow.prev = prev;
201
202 if (prev == NULL)
203 {
204 /* Start counting at one. */
205 bfun->number = 1;
206 bfun->insn_offset = 1;
207 }
208 else
209 {
210 gdb_assert (prev->flow.next == NULL);
211 prev->flow.next = bfun;
212
213 bfun->number = prev->number + 1;
214 bfun->insn_offset = (prev->insn_offset
215 + VEC_length (btrace_insn_s, prev->insn));
216 bfun->level = prev->level;
217 }
218
219 return bfun;
220 }
221
222 /* Update the UP field of a function segment. */
223
224 static void
225 ftrace_update_caller (struct btrace_function *bfun,
226 struct btrace_function *caller,
227 enum btrace_function_flag flags)
228 {
229 if (bfun->up != NULL)
230 ftrace_debug (bfun, "updating caller");
231
232 bfun->up = caller;
233 bfun->flags = flags;
234
235 ftrace_debug (bfun, "set caller");
236 }
237
238 /* Fix up the caller for all segments of a function. */
239
240 static void
241 ftrace_fixup_caller (struct btrace_function *bfun,
242 struct btrace_function *caller,
243 enum btrace_function_flag flags)
244 {
245 struct btrace_function *prev, *next;
246
247 ftrace_update_caller (bfun, caller, flags);
248
249 /* Update all function segments belonging to the same function. */
250 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
251 ftrace_update_caller (prev, caller, flags);
252
253 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
254 ftrace_update_caller (next, caller, flags);
255 }
256
257 /* Add a new function segment for a call.
258 CALLER is the chronologically preceding function segment.
259 MFUN and FUN are the symbol information we have for this function. */
260
261 static struct btrace_function *
262 ftrace_new_call (struct btrace_function *caller,
263 struct minimal_symbol *mfun,
264 struct symbol *fun)
265 {
266 struct btrace_function *bfun;
267
268 bfun = ftrace_new_function (caller, mfun, fun);
269 bfun->up = caller;
270 bfun->level += 1;
271
272 ftrace_debug (bfun, "new call");
273
274 return bfun;
275 }
276
277 /* Add a new function segment for a tail call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281 static struct btrace_function *
282 ftrace_new_tailcall (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285 {
286 struct btrace_function *bfun;
287
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
290 bfun->level += 1;
291 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
292
293 ftrace_debug (bfun, "new tail call");
294
295 return bfun;
296 }
297
298 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
299 symbol information. */
300
301 static struct btrace_function *
302 ftrace_find_caller (struct btrace_function *bfun,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305 {
306 for (; bfun != NULL; bfun = bfun->up)
307 {
308 /* Skip functions with incompatible symbol information. */
309 if (ftrace_function_switched (bfun, mfun, fun))
310 continue;
311
312 /* This is the function segment we're looking for. */
313 break;
314 }
315
316 return bfun;
317 }
318
319 /* Find the innermost caller in the back trace of BFUN, skipping all
320 function segments that do not end with a call instruction (e.g.
321 tail calls ending with a jump). */
322
323 static struct btrace_function *
324 ftrace_find_call (struct btrace_function *bfun)
325 {
326 for (; bfun != NULL; bfun = bfun->up)
327 {
328 struct btrace_insn *last;
329
330 /* Skip gaps. */
331 if (bfun->errcode != 0)
332 continue;
333
334 last = VEC_last (btrace_insn_s, bfun->insn);
335
336 if (last->iclass == BTRACE_INSN_CALL)
337 break;
338 }
339
340 return bfun;
341 }
342
343 /* Add a continuation segment for a function into which we return.
344 PREV is the chronologically preceding function segment.
345 MFUN and FUN are the symbol information we have for this function. */
346
347 static struct btrace_function *
348 ftrace_new_return (struct btrace_function *prev,
349 struct minimal_symbol *mfun,
350 struct symbol *fun)
351 {
352 struct btrace_function *bfun, *caller;
353
354 bfun = ftrace_new_function (prev, mfun, fun);
355
356 /* It is important to start at PREV's caller. Otherwise, we might find
357 PREV itself, if PREV is a recursive function. */
358 caller = ftrace_find_caller (prev->up, mfun, fun);
359 if (caller != NULL)
360 {
361 /* The caller of PREV is the preceding btrace function segment in this
362 function instance. */
363 gdb_assert (caller->segment.next == NULL);
364
365 caller->segment.next = bfun;
366 bfun->segment.prev = caller;
367
368 /* Maintain the function level. */
369 bfun->level = caller->level;
370
371 /* Maintain the call stack. */
372 bfun->up = caller->up;
373 bfun->flags = caller->flags;
374
375 ftrace_debug (bfun, "new return");
376 }
377 else
378 {
379 /* We did not find a caller. This could mean that something went
380 wrong or that the call is simply not included in the trace. */
381
382 /* Let's search for some actual call. */
383 caller = ftrace_find_call (prev->up);
384 if (caller == NULL)
385 {
386 /* There is no call in PREV's back trace. We assume that the
387 branch trace did not include it. */
388
389 /* Let's find the topmost call function - this skips tail calls. */
390 while (prev->up != NULL)
391 prev = prev->up;
392
393 /* We maintain levels for a series of returns for which we have
394 not seen the calls.
395 We start at the preceding function's level in case this has
396 already been a return for which we have not seen the call.
397 We start at level 0 otherwise, to handle tail calls correctly. */
398 bfun->level = min (0, prev->level) - 1;
399
400 /* Fix up the call stack for PREV. */
401 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
402
403 ftrace_debug (bfun, "new return - no caller");
404 }
405 else
406 {
407 /* There is a call in PREV's back trace to which we should have
408 returned. Let's remain at this level. */
409 bfun->level = prev->level;
410
411 ftrace_debug (bfun, "new return - unknown caller");
412 }
413 }
414
415 return bfun;
416 }
417
418 /* Add a new function segment for a function switch.
419 PREV is the chronologically preceding function segment.
420 MFUN and FUN are the symbol information we have for this function. */
421
422 static struct btrace_function *
423 ftrace_new_switch (struct btrace_function *prev,
424 struct minimal_symbol *mfun,
425 struct symbol *fun)
426 {
427 struct btrace_function *bfun;
428
429 /* This is an unexplained function switch. The call stack will likely
430 be wrong at this point. */
431 bfun = ftrace_new_function (prev, mfun, fun);
432
433 ftrace_debug (bfun, "new switch");
434
435 return bfun;
436 }
437
438 /* Add a new function segment for a gap in the trace due to a decode error.
439 PREV is the chronologically preceding function segment.
440 ERRCODE is the format-specific error code. */
441
442 static struct btrace_function *
443 ftrace_new_gap (struct btrace_function *prev, int errcode)
444 {
445 struct btrace_function *bfun;
446
447 /* We hijack prev if it was empty. */
448 if (prev != NULL && prev->errcode == 0
449 && VEC_empty (btrace_insn_s, prev->insn))
450 bfun = prev;
451 else
452 bfun = ftrace_new_function (prev, NULL, NULL);
453
454 bfun->errcode = errcode;
455
456 ftrace_debug (bfun, "new gap");
457
458 return bfun;
459 }
460
461 /* Update BFUN with respect to the instruction at PC. This may create new
462 function segments.
463 Return the chronologically latest function segment, never NULL. */
464
465 static struct btrace_function *
466 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
467 {
468 struct bound_minimal_symbol bmfun;
469 struct minimal_symbol *mfun;
470 struct symbol *fun;
471 struct btrace_insn *last;
472
473 /* Try to determine the function we're in. We use both types of symbols
474 to avoid surprises when we sometimes get a full symbol and sometimes
475 only a minimal symbol. */
476 fun = find_pc_function (pc);
477 bmfun = lookup_minimal_symbol_by_pc (pc);
478 mfun = bmfun.minsym;
479
480 if (fun == NULL && mfun == NULL)
481 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
482
483 /* If we didn't have a function or if we had a gap before, we create one. */
484 if (bfun == NULL || bfun->errcode != 0)
485 return ftrace_new_function (bfun, mfun, fun);
486
487 /* Check the last instruction, if we have one.
488 We do this check first, since it allows us to fill in the call stack
489 links in addition to the normal flow links. */
490 last = NULL;
491 if (!VEC_empty (btrace_insn_s, bfun->insn))
492 last = VEC_last (btrace_insn_s, bfun->insn);
493
494 if (last != NULL)
495 {
496 switch (last->iclass)
497 {
498 case BTRACE_INSN_RETURN:
499 {
500 const char *fname;
501
502 /* On some systems, _dl_runtime_resolve returns to the resolved
503 function instead of jumping to it. From our perspective,
504 however, this is a tailcall.
505 If we treated it as return, we wouldn't be able to find the
506 resolved function in our stack back trace. Hence, we would
507 lose the current stack back trace and start anew with an empty
508 back trace. When the resolved function returns, we would then
509 create a stack back trace with the same function names but
510 different frame id's. This will confuse stepping. */
511 fname = ftrace_print_function_name (bfun);
512 if (strcmp (fname, "_dl_runtime_resolve") == 0)
513 return ftrace_new_tailcall (bfun, mfun, fun);
514
515 return ftrace_new_return (bfun, mfun, fun);
516 }
517
518 case BTRACE_INSN_CALL:
519 /* Ignore calls to the next instruction. They are used for PIC. */
520 if (last->pc + last->size == pc)
521 break;
522
523 return ftrace_new_call (bfun, mfun, fun);
524
525 case BTRACE_INSN_JUMP:
526 {
527 CORE_ADDR start;
528
529 start = get_pc_function_start (pc);
530
531 /* If we can't determine the function for PC, we treat a jump at
532 the end of the block as tail call. */
533 if (start == 0 || start == pc)
534 return ftrace_new_tailcall (bfun, mfun, fun);
535 }
536 }
537 }
538
539 /* Check if we're switching functions for some other reason. */
540 if (ftrace_function_switched (bfun, mfun, fun))
541 {
542 DEBUG_FTRACE ("switching from %s in %s at %s",
543 ftrace_print_insn_addr (last),
544 ftrace_print_function_name (bfun),
545 ftrace_print_filename (bfun));
546
547 return ftrace_new_switch (bfun, mfun, fun);
548 }
549
550 return bfun;
551 }
552
553 /* Add the instruction at PC to BFUN's instructions. */
554
555 static void
556 ftrace_update_insns (struct btrace_function *bfun,
557 const struct btrace_insn *insn)
558 {
559 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
560
561 if (record_debug > 1)
562 ftrace_debug (bfun, "update insn");
563 }
564
565 /* Classify the instruction at PC. */
566
567 static enum btrace_insn_class
568 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
569 {
570 enum btrace_insn_class iclass;
571
572 iclass = BTRACE_INSN_OTHER;
573 TRY
574 {
575 if (gdbarch_insn_is_call (gdbarch, pc))
576 iclass = BTRACE_INSN_CALL;
577 else if (gdbarch_insn_is_ret (gdbarch, pc))
578 iclass = BTRACE_INSN_RETURN;
579 else if (gdbarch_insn_is_jump (gdbarch, pc))
580 iclass = BTRACE_INSN_JUMP;
581 }
582 CATCH (error, RETURN_MASK_ERROR)
583 {
584 }
585 END_CATCH
586
587 return iclass;
588 }
589
590 /* Compute the function branch trace from BTS trace. */
591
592 static void
593 btrace_compute_ftrace_bts (struct thread_info *tp,
594 const struct btrace_data_bts *btrace)
595 {
596 struct btrace_thread_info *btinfo;
597 struct btrace_function *begin, *end;
598 struct gdbarch *gdbarch;
599 unsigned int blk, ngaps;
600 int level;
601
602 gdbarch = target_gdbarch ();
603 btinfo = &tp->btrace;
604 begin = btinfo->begin;
605 end = btinfo->end;
606 ngaps = btinfo->ngaps;
607 level = begin != NULL ? -btinfo->level : INT_MAX;
608 blk = VEC_length (btrace_block_s, btrace->blocks);
609
610 while (blk != 0)
611 {
612 btrace_block_s *block;
613 CORE_ADDR pc;
614
615 blk -= 1;
616
617 block = VEC_index (btrace_block_s, btrace->blocks, blk);
618 pc = block->begin;
619
620 for (;;)
621 {
622 struct btrace_insn insn;
623 int size;
624
625 /* We should hit the end of the block. Warn if we went too far. */
626 if (block->end < pc)
627 {
628 /* Indicate the gap in the trace - unless we're at the
629 beginning. */
630 if (begin != NULL)
631 {
632 warning (_("Recorded trace may be corrupted around %s."),
633 core_addr_to_string_nz (pc));
634
635 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
636 ngaps += 1;
637 }
638 break;
639 }
640
641 end = ftrace_update_function (end, pc);
642 if (begin == NULL)
643 begin = end;
644
645 /* Maintain the function level offset.
646 For all but the last block, we do it here. */
647 if (blk != 0)
648 level = min (level, end->level);
649
650 size = 0;
651 TRY
652 {
653 size = gdb_insn_length (gdbarch, pc);
654 }
655 CATCH (error, RETURN_MASK_ERROR)
656 {
657 }
658 END_CATCH
659
660 insn.pc = pc;
661 insn.size = size;
662 insn.iclass = ftrace_classify_insn (gdbarch, pc);
663 insn.flags = 0;
664
665 ftrace_update_insns (end, &insn);
666
667 /* We're done once we pushed the instruction at the end. */
668 if (block->end == pc)
669 break;
670
671 /* We can't continue if we fail to compute the size. */
672 if (size <= 0)
673 {
674 warning (_("Recorded trace may be incomplete around %s."),
675 core_addr_to_string_nz (pc));
676
677 /* Indicate the gap in the trace. We just added INSN so we're
678 not at the beginning. */
679 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
680 ngaps += 1;
681
682 break;
683 }
684
685 pc += size;
686
687 /* Maintain the function level offset.
688 For the last block, we do it here to not consider the last
689 instruction.
690 Since the last instruction corresponds to the current instruction
691 and is not really part of the execution history, it shouldn't
692 affect the level. */
693 if (blk == 0)
694 level = min (level, end->level);
695 }
696 }
697
698 btinfo->begin = begin;
699 btinfo->end = end;
700 btinfo->ngaps = ngaps;
701
702 /* LEVEL is the minimal function level of all btrace function segments.
703 Define the global level offset to -LEVEL so all function levels are
704 normalized to start at zero. */
705 btinfo->level = -level;
706 }
707
708 #if defined (HAVE_LIBIPT)
709
710 static enum btrace_insn_class
711 pt_reclassify_insn (enum pt_insn_class iclass)
712 {
713 switch (iclass)
714 {
715 case ptic_call:
716 return BTRACE_INSN_CALL;
717
718 case ptic_return:
719 return BTRACE_INSN_RETURN;
720
721 case ptic_jump:
722 return BTRACE_INSN_JUMP;
723
724 default:
725 return BTRACE_INSN_OTHER;
726 }
727 }
728
729 /* Return the btrace instruction flags for INSN. */
730
731 static enum btrace_insn_flag
732 pt_btrace_insn_flags (const struct pt_insn *insn)
733 {
734 enum btrace_insn_flag flags = 0;
735
736 if (insn->speculative)
737 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
738
739 return flags;
740 }
741
742 /* Add function branch trace using DECODER. */
743
744 static void
745 ftrace_add_pt (struct pt_insn_decoder *decoder,
746 struct btrace_function **pbegin,
747 struct btrace_function **pend, int *plevel,
748 unsigned int *ngaps)
749 {
750 struct btrace_function *begin, *end, *upd;
751 uint64_t offset;
752 int errcode, nerrors;
753
754 begin = *pbegin;
755 end = *pend;
756 nerrors = 0;
757 for (;;)
758 {
759 struct btrace_insn btinsn;
760 struct pt_insn insn;
761
762 errcode = pt_insn_sync_forward (decoder);
763 if (errcode < 0)
764 {
765 if (errcode != -pte_eos)
766 warning (_("Failed to synchronize onto the Intel(R) Processor "
767 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
768 break;
769 }
770
771 memset (&btinsn, 0, sizeof (btinsn));
772 for (;;)
773 {
774 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
775 if (errcode < 0)
776 break;
777
778 /* Look for gaps in the trace - unless we're at the beginning. */
779 if (begin != NULL)
780 {
781 /* Tracing is disabled and re-enabled each time we enter the
782 kernel. Most times, we continue from the same instruction we
783 stopped before. This is indicated via the RESUMED instruction
784 flag. The ENABLED instruction flag means that we continued
785 from some other instruction. Indicate this as a trace gap. */
786 if (insn.enabled)
787 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
788
789 /* Indicate trace overflows. */
790 if (insn.resynced)
791 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
792 }
793
794 upd = ftrace_update_function (end, insn.ip);
795 if (upd != end)
796 {
797 *pend = end = upd;
798
799 if (begin == NULL)
800 *pbegin = begin = upd;
801 }
802
803 /* Maintain the function level offset. */
804 *plevel = min (*plevel, end->level);
805
806 btinsn.pc = (CORE_ADDR) insn.ip;
807 btinsn.size = (gdb_byte) insn.size;
808 btinsn.iclass = pt_reclassify_insn (insn.iclass);
809 btinsn.flags = pt_btrace_insn_flags (&insn);
810
811 ftrace_update_insns (end, &btinsn);
812 }
813
814 if (errcode == -pte_eos)
815 break;
816
817 /* If the gap is at the very beginning, we ignore it - we will have
818 less trace, but we won't have any holes in the trace. */
819 if (begin == NULL)
820 continue;
821
822 pt_insn_get_offset (decoder, &offset);
823
824 warning (_("Failed to decode Intel(R) Processor Trace near trace "
825 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
826 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
827
828 /* Indicate the gap in the trace. */
829 *pend = end = ftrace_new_gap (end, errcode);
830 *ngaps += 1;
831 }
832
833 if (nerrors > 0)
834 warning (_("The recorded execution trace may have gaps."));
835 }
836
837 /* A callback function to allow the trace decoder to read the inferior's
838 memory. */
839
840 static int
841 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
842 const struct pt_asid *asid, uint64_t pc,
843 void *context)
844 {
845 int errcode;
846
847 TRY
848 {
849 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
850 if (errcode != 0)
851 return -pte_nomap;
852 }
853 CATCH (error, RETURN_MASK_ERROR)
854 {
855 return -pte_nomap;
856 }
857 END_CATCH
858
859 return size;
860 }
861
862 /* Translate the vendor from one enum to another. */
863
864 static enum pt_cpu_vendor
865 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
866 {
867 switch (vendor)
868 {
869 default:
870 return pcv_unknown;
871
872 case CV_INTEL:
873 return pcv_intel;
874 }
875 }
876
877 /* Finalize the function branch trace after decode. */
878
879 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
880 struct thread_info *tp, int level)
881 {
882 pt_insn_free_decoder (decoder);
883
884 /* LEVEL is the minimal function level of all btrace function segments.
885 Define the global level offset to -LEVEL so all function levels are
886 normalized to start at zero. */
887 tp->btrace.level = -level;
888
889 /* Add a single last instruction entry for the current PC.
890 This allows us to compute the backtrace at the current PC using both
891 standard unwind and btrace unwind.
892 This extra entry is ignored by all record commands. */
893 btrace_add_pc (tp);
894 }
895
896 /* Compute the function branch trace from Intel(R) Processor Trace. */
897
898 static void
899 btrace_compute_ftrace_pt (struct thread_info *tp,
900 const struct btrace_data_pt *btrace)
901 {
902 struct btrace_thread_info *btinfo;
903 struct pt_insn_decoder *decoder;
904 struct pt_config config;
905 int level, errcode;
906
907 if (btrace->size == 0)
908 return;
909
910 btinfo = &tp->btrace;
911 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
912
913 pt_config_init(&config);
914 config.begin = btrace->data;
915 config.end = btrace->data + btrace->size;
916
917 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
918 config.cpu.family = btrace->config.cpu.family;
919 config.cpu.model = btrace->config.cpu.model;
920 config.cpu.stepping = btrace->config.cpu.stepping;
921
922 errcode = pt_cpu_errata (&config.errata, &config.cpu);
923 if (errcode < 0)
924 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
925 pt_errstr (pt_errcode (errcode)));
926
927 decoder = pt_insn_alloc_decoder (&config);
928 if (decoder == NULL)
929 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
930
931 TRY
932 {
933 struct pt_image *image;
934
935 image = pt_insn_get_image(decoder);
936 if (image == NULL)
937 error (_("Failed to configure the Intel(R) Processor Trace decoder."));
938
939 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
940 if (errcode < 0)
941 error (_("Failed to configure the Intel(R) Processor Trace decoder: "
942 "%s."), pt_errstr (pt_errcode (errcode)));
943
944 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
945 &btinfo->ngaps);
946 }
947 CATCH (error, RETURN_MASK_ALL)
948 {
949 /* Indicate a gap in the trace if we quit trace processing. */
950 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
951 {
952 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
953 btinfo->ngaps++;
954 }
955
956 btrace_finalize_ftrace_pt (decoder, tp, level);
957
958 throw_exception (error);
959 }
960 END_CATCH
961
962 btrace_finalize_ftrace_pt (decoder, tp, level);
963 }
964
965 #else /* defined (HAVE_LIBIPT) */
966
967 static void
968 btrace_compute_ftrace_pt (struct thread_info *tp,
969 const struct btrace_data_pt *btrace)
970 {
971 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
972 }
973
974 #endif /* defined (HAVE_LIBIPT) */
975
976 /* Compute the function branch trace from a block branch trace BTRACE for
977 a thread given by BTINFO. */
978
979 static void
980 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
981 {
982 DEBUG ("compute ftrace");
983
984 switch (btrace->format)
985 {
986 case BTRACE_FORMAT_NONE:
987 return;
988
989 case BTRACE_FORMAT_BTS:
990 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
991 return;
992
993 case BTRACE_FORMAT_PT:
994 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
995 return;
996 }
997
998 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
999 }
1000
1001 /* Add an entry for the current PC. */
1002
1003 static void
1004 btrace_add_pc (struct thread_info *tp)
1005 {
1006 struct btrace_data btrace;
1007 struct btrace_block *block;
1008 struct regcache *regcache;
1009 struct cleanup *cleanup;
1010 CORE_ADDR pc;
1011
1012 regcache = get_thread_regcache (tp->ptid);
1013 pc = regcache_read_pc (regcache);
1014
1015 btrace_data_init (&btrace);
1016 btrace.format = BTRACE_FORMAT_BTS;
1017 btrace.variant.bts.blocks = NULL;
1018
1019 cleanup = make_cleanup_btrace_data (&btrace);
1020
1021 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1022 block->begin = pc;
1023 block->end = pc;
1024
1025 btrace_compute_ftrace (tp, &btrace);
1026
1027 do_cleanups (cleanup);
1028 }
1029
1030 /* See btrace.h. */
1031
1032 void
1033 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1034 {
1035 if (tp->btrace.target != NULL)
1036 return;
1037
1038 #if !defined (HAVE_LIBIPT)
1039 if (conf->format == BTRACE_FORMAT_PT)
1040 error (_("GDB does not support Intel(R) Processor Trace."));
1041 #endif /* !defined (HAVE_LIBIPT) */
1042
1043 if (!target_supports_btrace (conf->format))
1044 error (_("Target does not support branch tracing."));
1045
1046 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1047
1048 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1049
1050 /* Add an entry for the current PC so we start tracing from where we
1051 enabled it. */
1052 if (tp->btrace.target != NULL)
1053 btrace_add_pc (tp);
1054 }
1055
1056 /* See btrace.h. */
1057
1058 const struct btrace_config *
1059 btrace_conf (const struct btrace_thread_info *btinfo)
1060 {
1061 if (btinfo->target == NULL)
1062 return NULL;
1063
1064 return target_btrace_conf (btinfo->target);
1065 }
1066
1067 /* See btrace.h. */
1068
1069 void
1070 btrace_disable (struct thread_info *tp)
1071 {
1072 struct btrace_thread_info *btp = &tp->btrace;
1073 int errcode = 0;
1074
1075 if (btp->target == NULL)
1076 return;
1077
1078 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1079
1080 target_disable_btrace (btp->target);
1081 btp->target = NULL;
1082
1083 btrace_clear (tp);
1084 }
1085
1086 /* See btrace.h. */
1087
1088 void
1089 btrace_teardown (struct thread_info *tp)
1090 {
1091 struct btrace_thread_info *btp = &tp->btrace;
1092 int errcode = 0;
1093
1094 if (btp->target == NULL)
1095 return;
1096
1097 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1098
1099 target_teardown_btrace (btp->target);
1100 btp->target = NULL;
1101
1102 btrace_clear (tp);
1103 }
1104
1105 /* Stitch branch trace in BTS format. */
1106
1107 static int
1108 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1109 {
1110 struct btrace_thread_info *btinfo;
1111 struct btrace_function *last_bfun;
1112 struct btrace_insn *last_insn;
1113 btrace_block_s *first_new_block;
1114
1115 btinfo = &tp->btrace;
1116 last_bfun = btinfo->end;
1117 gdb_assert (last_bfun != NULL);
1118 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1119
1120 /* If the existing trace ends with a gap, we just glue the traces
1121 together. We need to drop the last (i.e. chronologically first) block
1122 of the new trace, though, since we can't fill in the start address.*/
1123 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1124 {
1125 VEC_pop (btrace_block_s, btrace->blocks);
1126 return 0;
1127 }
1128
1129 /* Beware that block trace starts with the most recent block, so the
1130 chronologically first block in the new trace is the last block in
1131 the new trace's block vector. */
1132 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1133 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1134
1135 /* If the current PC at the end of the block is the same as in our current
1136 trace, there are two explanations:
1137 1. we executed the instruction and some branch brought us back.
1138 2. we have not made any progress.
1139 In the first case, the delta trace vector should contain at least two
1140 entries.
1141 In the second case, the delta trace vector should contain exactly one
1142 entry for the partial block containing the current PC. Remove it. */
1143 if (first_new_block->end == last_insn->pc
1144 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1145 {
1146 VEC_pop (btrace_block_s, btrace->blocks);
1147 return 0;
1148 }
1149
1150 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1151 core_addr_to_string_nz (first_new_block->end));
1152
1153 /* Do a simple sanity check to make sure we don't accidentally end up
1154 with a bad block. This should not occur in practice. */
1155 if (first_new_block->end < last_insn->pc)
1156 {
1157 warning (_("Error while trying to read delta trace. Falling back to "
1158 "a full read."));
1159 return -1;
1160 }
1161
1162 /* We adjust the last block to start at the end of our current trace. */
1163 gdb_assert (first_new_block->begin == 0);
1164 first_new_block->begin = last_insn->pc;
1165
1166 /* We simply pop the last insn so we can insert it again as part of
1167 the normal branch trace computation.
1168 Since instruction iterators are based on indices in the instructions
1169 vector, we don't leave any pointers dangling. */
1170 DEBUG ("pruning insn at %s for stitching",
1171 ftrace_print_insn_addr (last_insn));
1172
1173 VEC_pop (btrace_insn_s, last_bfun->insn);
1174
1175 /* The instructions vector may become empty temporarily if this has
1176 been the only instruction in this function segment.
1177 This violates the invariant but will be remedied shortly by
1178 btrace_compute_ftrace when we add the new trace. */
1179
1180 /* The only case where this would hurt is if the entire trace consisted
1181 of just that one instruction. If we remove it, we might turn the now
1182 empty btrace function segment into a gap. But we don't want gaps at
1183 the beginning. To avoid this, we remove the entire old trace. */
1184 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1185 btrace_clear (tp);
1186
1187 return 0;
1188 }
1189
1190 /* Adjust the block trace in order to stitch old and new trace together.
1191 BTRACE is the new delta trace between the last and the current stop.
1192 TP is the traced thread.
1193 May modifx BTRACE as well as the existing trace in TP.
1194 Return 0 on success, -1 otherwise. */
1195
1196 static int
1197 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1198 {
1199 /* If we don't have trace, there's nothing to do. */
1200 if (btrace_data_empty (btrace))
1201 return 0;
1202
1203 switch (btrace->format)
1204 {
1205 case BTRACE_FORMAT_NONE:
1206 return 0;
1207
1208 case BTRACE_FORMAT_BTS:
1209 return btrace_stitch_bts (&btrace->variant.bts, tp);
1210
1211 case BTRACE_FORMAT_PT:
1212 /* Delta reads are not supported. */
1213 return -1;
1214 }
1215
1216 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1217 }
1218
1219 /* Clear the branch trace histories in BTINFO. */
1220
1221 static void
1222 btrace_clear_history (struct btrace_thread_info *btinfo)
1223 {
1224 xfree (btinfo->insn_history);
1225 xfree (btinfo->call_history);
1226 xfree (btinfo->replay);
1227
1228 btinfo->insn_history = NULL;
1229 btinfo->call_history = NULL;
1230 btinfo->replay = NULL;
1231 }
1232
1233 /* Clear the branch trace maintenance histories in BTINFO. */
1234
1235 static void
1236 btrace_maint_clear (struct btrace_thread_info *btinfo)
1237 {
1238 switch (btinfo->data.format)
1239 {
1240 default:
1241 break;
1242
1243 case BTRACE_FORMAT_BTS:
1244 btinfo->maint.variant.bts.packet_history.begin = 0;
1245 btinfo->maint.variant.bts.packet_history.end = 0;
1246 break;
1247
1248 #if defined (HAVE_LIBIPT)
1249 case BTRACE_FORMAT_PT:
1250 xfree (btinfo->maint.variant.pt.packets);
1251
1252 btinfo->maint.variant.pt.packets = NULL;
1253 btinfo->maint.variant.pt.packet_history.begin = 0;
1254 btinfo->maint.variant.pt.packet_history.end = 0;
1255 break;
1256 #endif /* defined (HAVE_LIBIPT) */
1257 }
1258 }
1259
1260 /* See btrace.h. */
1261
1262 void
1263 btrace_fetch (struct thread_info *tp)
1264 {
1265 struct btrace_thread_info *btinfo;
1266 struct btrace_target_info *tinfo;
1267 struct btrace_data btrace;
1268 struct cleanup *cleanup;
1269 int errcode;
1270
1271 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1272
1273 btinfo = &tp->btrace;
1274 tinfo = btinfo->target;
1275 if (tinfo == NULL)
1276 return;
1277
1278 /* There's no way we could get new trace while replaying.
1279 On the other hand, delta trace would return a partial record with the
1280 current PC, which is the replay PC, not the last PC, as expected. */
1281 if (btinfo->replay != NULL)
1282 return;
1283
1284 btrace_data_init (&btrace);
1285 cleanup = make_cleanup_btrace_data (&btrace);
1286
1287 /* Let's first try to extend the trace we already have. */
1288 if (btinfo->end != NULL)
1289 {
1290 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1291 if (errcode == 0)
1292 {
1293 /* Success. Let's try to stitch the traces together. */
1294 errcode = btrace_stitch_trace (&btrace, tp);
1295 }
1296 else
1297 {
1298 /* We failed to read delta trace. Let's try to read new trace. */
1299 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1300
1301 /* If we got any new trace, discard what we have. */
1302 if (errcode == 0 && !btrace_data_empty (&btrace))
1303 btrace_clear (tp);
1304 }
1305
1306 /* If we were not able to read the trace, we start over. */
1307 if (errcode != 0)
1308 {
1309 btrace_clear (tp);
1310 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1311 }
1312 }
1313 else
1314 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1315
1316 /* If we were not able to read the branch trace, signal an error. */
1317 if (errcode != 0)
1318 error (_("Failed to read branch trace."));
1319
1320 /* Compute the trace, provided we have any. */
1321 if (!btrace_data_empty (&btrace))
1322 {
1323 /* Store the raw trace data. The stored data will be cleared in
1324 btrace_clear, so we always append the new trace. */
1325 btrace_data_append (&btinfo->data, &btrace);
1326 btrace_maint_clear (btinfo);
1327
1328 btrace_clear_history (btinfo);
1329 btrace_compute_ftrace (tp, &btrace);
1330 }
1331
1332 do_cleanups (cleanup);
1333 }
1334
1335 /* See btrace.h. */
1336
1337 void
1338 btrace_clear (struct thread_info *tp)
1339 {
1340 struct btrace_thread_info *btinfo;
1341 struct btrace_function *it, *trash;
1342
1343 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1344
1345 /* Make sure btrace frames that may hold a pointer into the branch
1346 trace data are destroyed. */
1347 reinit_frame_cache ();
1348
1349 btinfo = &tp->btrace;
1350
1351 it = btinfo->begin;
1352 while (it != NULL)
1353 {
1354 trash = it;
1355 it = it->flow.next;
1356
1357 xfree (trash);
1358 }
1359
1360 btinfo->begin = NULL;
1361 btinfo->end = NULL;
1362 btinfo->ngaps = 0;
1363
1364 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1365 btrace_maint_clear (btinfo);
1366 btrace_data_clear (&btinfo->data);
1367 btrace_clear_history (btinfo);
1368 }
1369
1370 /* See btrace.h. */
1371
1372 void
1373 btrace_free_objfile (struct objfile *objfile)
1374 {
1375 struct thread_info *tp;
1376
1377 DEBUG ("free objfile");
1378
1379 ALL_NON_EXITED_THREADS (tp)
1380 btrace_clear (tp);
1381 }
1382
1383 #if defined (HAVE_LIBEXPAT)
1384
1385 /* Check the btrace document version. */
1386
1387 static void
1388 check_xml_btrace_version (struct gdb_xml_parser *parser,
1389 const struct gdb_xml_element *element,
1390 void *user_data, VEC (gdb_xml_value_s) *attributes)
1391 {
1392 const char *version
1393 = (const char *) xml_find_attribute (attributes, "version")->value;
1394
1395 if (strcmp (version, "1.0") != 0)
1396 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1397 }
1398
1399 /* Parse a btrace "block" xml record. */
1400
1401 static void
1402 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1403 const struct gdb_xml_element *element,
1404 void *user_data, VEC (gdb_xml_value_s) *attributes)
1405 {
1406 struct btrace_data *btrace;
1407 struct btrace_block *block;
1408 ULONGEST *begin, *end;
1409
1410 btrace = (struct btrace_data *) user_data;
1411
1412 switch (btrace->format)
1413 {
1414 case BTRACE_FORMAT_BTS:
1415 break;
1416
1417 case BTRACE_FORMAT_NONE:
1418 btrace->format = BTRACE_FORMAT_BTS;
1419 btrace->variant.bts.blocks = NULL;
1420 break;
1421
1422 default:
1423 gdb_xml_error (parser, _("Btrace format error."));
1424 }
1425
1426 begin = (ULONGEST *) xml_find_attribute (attributes, "begin")->value;
1427 end = (ULONGEST *) xml_find_attribute (attributes, "end")->value;
1428
1429 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1430 block->begin = *begin;
1431 block->end = *end;
1432 }
1433
1434 /* Parse a "raw" xml record. */
1435
1436 static void
1437 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1438 gdb_byte **pdata, size_t *psize)
1439 {
1440 struct cleanup *cleanup;
1441 gdb_byte *data, *bin;
1442 size_t len, size;
1443
1444 len = strlen (body_text);
1445 if (len % 2 != 0)
1446 gdb_xml_error (parser, _("Bad raw data size."));
1447
1448 size = len / 2;
1449
1450 bin = data = (gdb_byte *) xmalloc (size);
1451 cleanup = make_cleanup (xfree, data);
1452
1453 /* We use hex encoding - see common/rsp-low.h. */
1454 while (len > 0)
1455 {
1456 char hi, lo;
1457
1458 hi = *body_text++;
1459 lo = *body_text++;
1460
1461 if (hi == 0 || lo == 0)
1462 gdb_xml_error (parser, _("Bad hex encoding."));
1463
1464 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1465 len -= 2;
1466 }
1467
1468 discard_cleanups (cleanup);
1469
1470 *pdata = data;
1471 *psize = size;
1472 }
1473
1474 /* Parse a btrace pt-config "cpu" xml record. */
1475
1476 static void
1477 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1478 const struct gdb_xml_element *element,
1479 void *user_data,
1480 VEC (gdb_xml_value_s) *attributes)
1481 {
1482 struct btrace_data *btrace;
1483 const char *vendor;
1484 ULONGEST *family, *model, *stepping;
1485
1486 vendor = (const char *) xml_find_attribute (attributes, "vendor")->value;
1487 family = (ULONGEST *) xml_find_attribute (attributes, "family")->value;
1488 model = (ULONGEST *) xml_find_attribute (attributes, "model")->value;
1489 stepping = (ULONGEST *) xml_find_attribute (attributes, "stepping")->value;
1490
1491 btrace = (struct btrace_data *) user_data;
1492
1493 if (strcmp (vendor, "GenuineIntel") == 0)
1494 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1495
1496 btrace->variant.pt.config.cpu.family = *family;
1497 btrace->variant.pt.config.cpu.model = *model;
1498 btrace->variant.pt.config.cpu.stepping = *stepping;
1499 }
1500
1501 /* Parse a btrace pt "raw" xml record. */
1502
1503 static void
1504 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1505 const struct gdb_xml_element *element,
1506 void *user_data, const char *body_text)
1507 {
1508 struct btrace_data *btrace;
1509
1510 btrace = (struct btrace_data *) user_data;
1511 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1512 &btrace->variant.pt.size);
1513 }
1514
1515 /* Parse a btrace "pt" xml record. */
1516
1517 static void
1518 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1519 const struct gdb_xml_element *element,
1520 void *user_data, VEC (gdb_xml_value_s) *attributes)
1521 {
1522 struct btrace_data *btrace;
1523
1524 btrace = (struct btrace_data *) user_data;
1525 btrace->format = BTRACE_FORMAT_PT;
1526 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1527 btrace->variant.pt.data = NULL;
1528 btrace->variant.pt.size = 0;
1529 }
1530
1531 static const struct gdb_xml_attribute block_attributes[] = {
1532 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1533 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1534 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1535 };
1536
1537 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1538 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1539 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1540 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1541 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1542 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1543 };
1544
1545 static const struct gdb_xml_element btrace_pt_config_children[] = {
1546 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1547 parse_xml_btrace_pt_config_cpu, NULL },
1548 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1549 };
1550
1551 static const struct gdb_xml_element btrace_pt_children[] = {
1552 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1553 NULL },
1554 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1555 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1556 };
1557
1558 static const struct gdb_xml_attribute btrace_attributes[] = {
1559 { "version", GDB_XML_AF_NONE, NULL, NULL },
1560 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1561 };
1562
1563 static const struct gdb_xml_element btrace_children[] = {
1564 { "block", block_attributes, NULL,
1565 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1566 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1567 NULL },
1568 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1569 };
1570
1571 static const struct gdb_xml_element btrace_elements[] = {
1572 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1573 check_xml_btrace_version, NULL },
1574 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1575 };
1576
1577 #endif /* defined (HAVE_LIBEXPAT) */
1578
1579 /* See btrace.h. */
1580
1581 void
1582 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1583 {
1584 struct cleanup *cleanup;
1585 int errcode;
1586
1587 #if defined (HAVE_LIBEXPAT)
1588
1589 btrace->format = BTRACE_FORMAT_NONE;
1590
1591 cleanup = make_cleanup_btrace_data (btrace);
1592 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1593 buffer, btrace);
1594 if (errcode != 0)
1595 error (_("Error parsing branch trace."));
1596
1597 /* Keep parse results. */
1598 discard_cleanups (cleanup);
1599
1600 #else /* !defined (HAVE_LIBEXPAT) */
1601
1602 error (_("Cannot process branch trace. XML parsing is not supported."));
1603
1604 #endif /* !defined (HAVE_LIBEXPAT) */
1605 }
1606
1607 #if defined (HAVE_LIBEXPAT)
1608
1609 /* Parse a btrace-conf "bts" xml record. */
1610
1611 static void
1612 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1613 const struct gdb_xml_element *element,
1614 void *user_data, VEC (gdb_xml_value_s) *attributes)
1615 {
1616 struct btrace_config *conf;
1617 struct gdb_xml_value *size;
1618
1619 conf = (struct btrace_config *) user_data;
1620 conf->format = BTRACE_FORMAT_BTS;
1621 conf->bts.size = 0;
1622
1623 size = xml_find_attribute (attributes, "size");
1624 if (size != NULL)
1625 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1626 }
1627
1628 /* Parse a btrace-conf "pt" xml record. */
1629
1630 static void
1631 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1632 const struct gdb_xml_element *element,
1633 void *user_data, VEC (gdb_xml_value_s) *attributes)
1634 {
1635 struct btrace_config *conf;
1636 struct gdb_xml_value *size;
1637
1638 conf = (struct btrace_config *) user_data;
1639 conf->format = BTRACE_FORMAT_PT;
1640 conf->pt.size = 0;
1641
1642 size = xml_find_attribute (attributes, "size");
1643 if (size != NULL)
1644 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1645 }
1646
1647 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1648 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1649 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1650 };
1651
1652 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1653 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1654 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1655 };
1656
1657 static const struct gdb_xml_element btrace_conf_children[] = {
1658 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1659 parse_xml_btrace_conf_bts, NULL },
1660 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1661 parse_xml_btrace_conf_pt, NULL },
1662 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1663 };
1664
1665 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1666 { "version", GDB_XML_AF_NONE, NULL, NULL },
1667 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1668 };
1669
1670 static const struct gdb_xml_element btrace_conf_elements[] = {
1671 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1672 GDB_XML_EF_NONE, NULL, NULL },
1673 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1674 };
1675
1676 #endif /* defined (HAVE_LIBEXPAT) */
1677
1678 /* See btrace.h. */
1679
1680 void
1681 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1682 {
1683 int errcode;
1684
1685 #if defined (HAVE_LIBEXPAT)
1686
1687 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1688 btrace_conf_elements, xml, conf);
1689 if (errcode != 0)
1690 error (_("Error parsing branch trace configuration."));
1691
1692 #else /* !defined (HAVE_LIBEXPAT) */
1693
1694 error (_("XML parsing is not supported."));
1695
1696 #endif /* !defined (HAVE_LIBEXPAT) */
1697 }
1698
1699 /* See btrace.h. */
1700
1701 const struct btrace_insn *
1702 btrace_insn_get (const struct btrace_insn_iterator *it)
1703 {
1704 const struct btrace_function *bfun;
1705 unsigned int index, end;
1706
1707 index = it->index;
1708 bfun = it->function;
1709
1710 /* Check if the iterator points to a gap in the trace. */
1711 if (bfun->errcode != 0)
1712 return NULL;
1713
1714 /* The index is within the bounds of this function's instruction vector. */
1715 end = VEC_length (btrace_insn_s, bfun->insn);
1716 gdb_assert (0 < end);
1717 gdb_assert (index < end);
1718
1719 return VEC_index (btrace_insn_s, bfun->insn, index);
1720 }
1721
1722 /* See btrace.h. */
1723
1724 unsigned int
1725 btrace_insn_number (const struct btrace_insn_iterator *it)
1726 {
1727 const struct btrace_function *bfun;
1728
1729 bfun = it->function;
1730
1731 /* Return zero if the iterator points to a gap in the trace. */
1732 if (bfun->errcode != 0)
1733 return 0;
1734
1735 return bfun->insn_offset + it->index;
1736 }
1737
1738 /* See btrace.h. */
1739
1740 void
1741 btrace_insn_begin (struct btrace_insn_iterator *it,
1742 const struct btrace_thread_info *btinfo)
1743 {
1744 const struct btrace_function *bfun;
1745
1746 bfun = btinfo->begin;
1747 if (bfun == NULL)
1748 error (_("No trace."));
1749
1750 it->function = bfun;
1751 it->index = 0;
1752 }
1753
1754 /* See btrace.h. */
1755
1756 void
1757 btrace_insn_end (struct btrace_insn_iterator *it,
1758 const struct btrace_thread_info *btinfo)
1759 {
1760 const struct btrace_function *bfun;
1761 unsigned int length;
1762
1763 bfun = btinfo->end;
1764 if (bfun == NULL)
1765 error (_("No trace."));
1766
1767 length = VEC_length (btrace_insn_s, bfun->insn);
1768
1769 /* The last function may either be a gap or it contains the current
1770 instruction, which is one past the end of the execution trace; ignore
1771 it. */
1772 if (length > 0)
1773 length -= 1;
1774
1775 it->function = bfun;
1776 it->index = length;
1777 }
1778
1779 /* See btrace.h. */
1780
1781 unsigned int
1782 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1783 {
1784 const struct btrace_function *bfun;
1785 unsigned int index, steps;
1786
1787 bfun = it->function;
1788 steps = 0;
1789 index = it->index;
1790
1791 while (stride != 0)
1792 {
1793 unsigned int end, space, adv;
1794
1795 end = VEC_length (btrace_insn_s, bfun->insn);
1796
1797 /* An empty function segment represents a gap in the trace. We count
1798 it as one instruction. */
1799 if (end == 0)
1800 {
1801 const struct btrace_function *next;
1802
1803 next = bfun->flow.next;
1804 if (next == NULL)
1805 break;
1806
1807 stride -= 1;
1808 steps += 1;
1809
1810 bfun = next;
1811 index = 0;
1812
1813 continue;
1814 }
1815
1816 gdb_assert (0 < end);
1817 gdb_assert (index < end);
1818
1819 /* Compute the number of instructions remaining in this segment. */
1820 space = end - index;
1821
1822 /* Advance the iterator as far as possible within this segment. */
1823 adv = min (space, stride);
1824 stride -= adv;
1825 index += adv;
1826 steps += adv;
1827
1828 /* Move to the next function if we're at the end of this one. */
1829 if (index == end)
1830 {
1831 const struct btrace_function *next;
1832
1833 next = bfun->flow.next;
1834 if (next == NULL)
1835 {
1836 /* We stepped past the last function.
1837
1838 Let's adjust the index to point to the last instruction in
1839 the previous function. */
1840 index -= 1;
1841 steps -= 1;
1842 break;
1843 }
1844
1845 /* We now point to the first instruction in the new function. */
1846 bfun = next;
1847 index = 0;
1848 }
1849
1850 /* We did make progress. */
1851 gdb_assert (adv > 0);
1852 }
1853
1854 /* Update the iterator. */
1855 it->function = bfun;
1856 it->index = index;
1857
1858 return steps;
1859 }
1860
1861 /* See btrace.h. */
1862
1863 unsigned int
1864 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1865 {
1866 const struct btrace_function *bfun;
1867 unsigned int index, steps;
1868
1869 bfun = it->function;
1870 steps = 0;
1871 index = it->index;
1872
1873 while (stride != 0)
1874 {
1875 unsigned int adv;
1876
1877 /* Move to the previous function if we're at the start of this one. */
1878 if (index == 0)
1879 {
1880 const struct btrace_function *prev;
1881
1882 prev = bfun->flow.prev;
1883 if (prev == NULL)
1884 break;
1885
1886 /* We point to one after the last instruction in the new function. */
1887 bfun = prev;
1888 index = VEC_length (btrace_insn_s, bfun->insn);
1889
1890 /* An empty function segment represents a gap in the trace. We count
1891 it as one instruction. */
1892 if (index == 0)
1893 {
1894 stride -= 1;
1895 steps += 1;
1896
1897 continue;
1898 }
1899 }
1900
1901 /* Advance the iterator as far as possible within this segment. */
1902 adv = min (index, stride);
1903
1904 stride -= adv;
1905 index -= adv;
1906 steps += adv;
1907
1908 /* We did make progress. */
1909 gdb_assert (adv > 0);
1910 }
1911
1912 /* Update the iterator. */
1913 it->function = bfun;
1914 it->index = index;
1915
1916 return steps;
1917 }
1918
1919 /* See btrace.h. */
1920
1921 int
1922 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1923 const struct btrace_insn_iterator *rhs)
1924 {
1925 unsigned int lnum, rnum;
1926
1927 lnum = btrace_insn_number (lhs);
1928 rnum = btrace_insn_number (rhs);
1929
1930 /* A gap has an instruction number of zero. Things are getting more
1931 complicated if gaps are involved.
1932
1933 We take the instruction number offset from the iterator's function.
1934 This is the number of the first instruction after the gap.
1935
1936 This is OK as long as both lhs and rhs point to gaps. If only one of
1937 them does, we need to adjust the number based on the other's regular
1938 instruction number. Otherwise, a gap might compare equal to an
1939 instruction. */
1940
1941 if (lnum == 0 && rnum == 0)
1942 {
1943 lnum = lhs->function->insn_offset;
1944 rnum = rhs->function->insn_offset;
1945 }
1946 else if (lnum == 0)
1947 {
1948 lnum = lhs->function->insn_offset;
1949
1950 if (lnum == rnum)
1951 lnum -= 1;
1952 }
1953 else if (rnum == 0)
1954 {
1955 rnum = rhs->function->insn_offset;
1956
1957 if (rnum == lnum)
1958 rnum -= 1;
1959 }
1960
1961 return (int) (lnum - rnum);
1962 }
1963
1964 /* See btrace.h. */
1965
1966 int
1967 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1968 const struct btrace_thread_info *btinfo,
1969 unsigned int number)
1970 {
1971 const struct btrace_function *bfun;
1972 unsigned int end, length;
1973
1974 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1975 {
1976 /* Skip gaps. */
1977 if (bfun->errcode != 0)
1978 continue;
1979
1980 if (bfun->insn_offset <= number)
1981 break;
1982 }
1983
1984 if (bfun == NULL)
1985 return 0;
1986
1987 length = VEC_length (btrace_insn_s, bfun->insn);
1988 gdb_assert (length > 0);
1989
1990 end = bfun->insn_offset + length;
1991 if (end <= number)
1992 return 0;
1993
1994 it->function = bfun;
1995 it->index = number - bfun->insn_offset;
1996
1997 return 1;
1998 }
1999
2000 /* See btrace.h. */
2001
2002 const struct btrace_function *
2003 btrace_call_get (const struct btrace_call_iterator *it)
2004 {
2005 return it->function;
2006 }
2007
2008 /* See btrace.h. */
2009
2010 unsigned int
2011 btrace_call_number (const struct btrace_call_iterator *it)
2012 {
2013 const struct btrace_thread_info *btinfo;
2014 const struct btrace_function *bfun;
2015 unsigned int insns;
2016
2017 btinfo = it->btinfo;
2018 bfun = it->function;
2019 if (bfun != NULL)
2020 return bfun->number;
2021
2022 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2023 number of the last function. */
2024 bfun = btinfo->end;
2025 insns = VEC_length (btrace_insn_s, bfun->insn);
2026
2027 /* If the function contains only a single instruction (i.e. the current
2028 instruction), it will be skipped and its number is already the number
2029 we seek. */
2030 if (insns == 1)
2031 return bfun->number;
2032
2033 /* Otherwise, return one more than the number of the last function. */
2034 return bfun->number + 1;
2035 }
2036
2037 /* See btrace.h. */
2038
2039 void
2040 btrace_call_begin (struct btrace_call_iterator *it,
2041 const struct btrace_thread_info *btinfo)
2042 {
2043 const struct btrace_function *bfun;
2044
2045 bfun = btinfo->begin;
2046 if (bfun == NULL)
2047 error (_("No trace."));
2048
2049 it->btinfo = btinfo;
2050 it->function = bfun;
2051 }
2052
2053 /* See btrace.h. */
2054
2055 void
2056 btrace_call_end (struct btrace_call_iterator *it,
2057 const struct btrace_thread_info *btinfo)
2058 {
2059 const struct btrace_function *bfun;
2060
2061 bfun = btinfo->end;
2062 if (bfun == NULL)
2063 error (_("No trace."));
2064
2065 it->btinfo = btinfo;
2066 it->function = NULL;
2067 }
2068
2069 /* See btrace.h. */
2070
2071 unsigned int
2072 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2073 {
2074 const struct btrace_function *bfun;
2075 unsigned int steps;
2076
2077 bfun = it->function;
2078 steps = 0;
2079 while (bfun != NULL)
2080 {
2081 const struct btrace_function *next;
2082 unsigned int insns;
2083
2084 next = bfun->flow.next;
2085 if (next == NULL)
2086 {
2087 /* Ignore the last function if it only contains a single
2088 (i.e. the current) instruction. */
2089 insns = VEC_length (btrace_insn_s, bfun->insn);
2090 if (insns == 1)
2091 steps -= 1;
2092 }
2093
2094 if (stride == steps)
2095 break;
2096
2097 bfun = next;
2098 steps += 1;
2099 }
2100
2101 it->function = bfun;
2102 return steps;
2103 }
2104
2105 /* See btrace.h. */
2106
2107 unsigned int
2108 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2109 {
2110 const struct btrace_thread_info *btinfo;
2111 const struct btrace_function *bfun;
2112 unsigned int steps;
2113
2114 bfun = it->function;
2115 steps = 0;
2116
2117 if (bfun == NULL)
2118 {
2119 unsigned int insns;
2120
2121 btinfo = it->btinfo;
2122 bfun = btinfo->end;
2123 if (bfun == NULL)
2124 return 0;
2125
2126 /* Ignore the last function if it only contains a single
2127 (i.e. the current) instruction. */
2128 insns = VEC_length (btrace_insn_s, bfun->insn);
2129 if (insns == 1)
2130 bfun = bfun->flow.prev;
2131
2132 if (bfun == NULL)
2133 return 0;
2134
2135 steps += 1;
2136 }
2137
2138 while (steps < stride)
2139 {
2140 const struct btrace_function *prev;
2141
2142 prev = bfun->flow.prev;
2143 if (prev == NULL)
2144 break;
2145
2146 bfun = prev;
2147 steps += 1;
2148 }
2149
2150 it->function = bfun;
2151 return steps;
2152 }
2153
2154 /* See btrace.h. */
2155
2156 int
2157 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2158 const struct btrace_call_iterator *rhs)
2159 {
2160 unsigned int lnum, rnum;
2161
2162 lnum = btrace_call_number (lhs);
2163 rnum = btrace_call_number (rhs);
2164
2165 return (int) (lnum - rnum);
2166 }
2167
2168 /* See btrace.h. */
2169
2170 int
2171 btrace_find_call_by_number (struct btrace_call_iterator *it,
2172 const struct btrace_thread_info *btinfo,
2173 unsigned int number)
2174 {
2175 const struct btrace_function *bfun;
2176
2177 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2178 {
2179 unsigned int bnum;
2180
2181 bnum = bfun->number;
2182 if (number == bnum)
2183 {
2184 it->btinfo = btinfo;
2185 it->function = bfun;
2186 return 1;
2187 }
2188
2189 /* Functions are ordered and numbered consecutively. We could bail out
2190 earlier. On the other hand, it is very unlikely that we search for
2191 a nonexistent function. */
2192 }
2193
2194 return 0;
2195 }
2196
2197 /* See btrace.h. */
2198
2199 void
2200 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2201 const struct btrace_insn_iterator *begin,
2202 const struct btrace_insn_iterator *end)
2203 {
2204 if (btinfo->insn_history == NULL)
2205 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2206
2207 btinfo->insn_history->begin = *begin;
2208 btinfo->insn_history->end = *end;
2209 }
2210
2211 /* See btrace.h. */
2212
2213 void
2214 btrace_set_call_history (struct btrace_thread_info *btinfo,
2215 const struct btrace_call_iterator *begin,
2216 const struct btrace_call_iterator *end)
2217 {
2218 gdb_assert (begin->btinfo == end->btinfo);
2219
2220 if (btinfo->call_history == NULL)
2221 btinfo->call_history = XCNEW (struct btrace_call_history);
2222
2223 btinfo->call_history->begin = *begin;
2224 btinfo->call_history->end = *end;
2225 }
2226
2227 /* See btrace.h. */
2228
2229 int
2230 btrace_is_replaying (struct thread_info *tp)
2231 {
2232 return tp->btrace.replay != NULL;
2233 }
2234
2235 /* See btrace.h. */
2236
2237 int
2238 btrace_is_empty (struct thread_info *tp)
2239 {
2240 struct btrace_insn_iterator begin, end;
2241 struct btrace_thread_info *btinfo;
2242
2243 btinfo = &tp->btrace;
2244
2245 if (btinfo->begin == NULL)
2246 return 1;
2247
2248 btrace_insn_begin (&begin, btinfo);
2249 btrace_insn_end (&end, btinfo);
2250
2251 return btrace_insn_cmp (&begin, &end) == 0;
2252 }
2253
2254 /* Forward the cleanup request. */
2255
2256 static void
2257 do_btrace_data_cleanup (void *arg)
2258 {
2259 btrace_data_fini ((struct btrace_data *) arg);
2260 }
2261
2262 /* See btrace.h. */
2263
2264 struct cleanup *
2265 make_cleanup_btrace_data (struct btrace_data *data)
2266 {
2267 return make_cleanup (do_btrace_data_cleanup, data);
2268 }
2269
2270 #if defined (HAVE_LIBIPT)
2271
2272 /* Print a single packet. */
2273
2274 static void
2275 pt_print_packet (const struct pt_packet *packet)
2276 {
2277 switch (packet->type)
2278 {
2279 default:
2280 printf_unfiltered (("[??: %x]"), packet->type);
2281 break;
2282
2283 case ppt_psb:
2284 printf_unfiltered (("psb"));
2285 break;
2286
2287 case ppt_psbend:
2288 printf_unfiltered (("psbend"));
2289 break;
2290
2291 case ppt_pad:
2292 printf_unfiltered (("pad"));
2293 break;
2294
2295 case ppt_tip:
2296 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2297 packet->payload.ip.ipc,
2298 packet->payload.ip.ip);
2299 break;
2300
2301 case ppt_tip_pge:
2302 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2303 packet->payload.ip.ipc,
2304 packet->payload.ip.ip);
2305 break;
2306
2307 case ppt_tip_pgd:
2308 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2309 packet->payload.ip.ipc,
2310 packet->payload.ip.ip);
2311 break;
2312
2313 case ppt_fup:
2314 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2315 packet->payload.ip.ipc,
2316 packet->payload.ip.ip);
2317 break;
2318
2319 case ppt_tnt_8:
2320 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2321 packet->payload.tnt.bit_size,
2322 packet->payload.tnt.payload);
2323 break;
2324
2325 case ppt_tnt_64:
2326 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2327 packet->payload.tnt.bit_size,
2328 packet->payload.tnt.payload);
2329 break;
2330
2331 case ppt_pip:
2332 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2333 packet->payload.pip.nr ? (" nr") : (""));
2334 break;
2335
2336 case ppt_tsc:
2337 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2338 break;
2339
2340 case ppt_cbr:
2341 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2342 break;
2343
2344 case ppt_mode:
2345 switch (packet->payload.mode.leaf)
2346 {
2347 default:
2348 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2349 break;
2350
2351 case pt_mol_exec:
2352 printf_unfiltered (("mode.exec%s%s"),
2353 packet->payload.mode.bits.exec.csl
2354 ? (" cs.l") : (""),
2355 packet->payload.mode.bits.exec.csd
2356 ? (" cs.d") : (""));
2357 break;
2358
2359 case pt_mol_tsx:
2360 printf_unfiltered (("mode.tsx%s%s"),
2361 packet->payload.mode.bits.tsx.intx
2362 ? (" intx") : (""),
2363 packet->payload.mode.bits.tsx.abrt
2364 ? (" abrt") : (""));
2365 break;
2366 }
2367 break;
2368
2369 case ppt_ovf:
2370 printf_unfiltered (("ovf"));
2371 break;
2372
2373 case ppt_stop:
2374 printf_unfiltered (("stop"));
2375 break;
2376
2377 case ppt_vmcs:
2378 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2379 break;
2380
2381 case ppt_tma:
2382 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2383 packet->payload.tma.fc);
2384 break;
2385
2386 case ppt_mtc:
2387 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2388 break;
2389
2390 case ppt_cyc:
2391 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2392 break;
2393
2394 case ppt_mnt:
2395 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2396 break;
2397 }
2398 }
2399
2400 /* Decode packets into MAINT using DECODER. */
2401
2402 static void
2403 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2404 struct pt_packet_decoder *decoder)
2405 {
2406 int errcode;
2407
2408 for (;;)
2409 {
2410 struct btrace_pt_packet packet;
2411
2412 errcode = pt_pkt_sync_forward (decoder);
2413 if (errcode < 0)
2414 break;
2415
2416 for (;;)
2417 {
2418 pt_pkt_get_offset (decoder, &packet.offset);
2419
2420 errcode = pt_pkt_next (decoder, &packet.packet,
2421 sizeof(packet.packet));
2422 if (errcode < 0)
2423 break;
2424
2425 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2426 {
2427 packet.errcode = pt_errcode (errcode);
2428 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2429 &packet);
2430 }
2431 }
2432
2433 if (errcode == -pte_eos)
2434 break;
2435
2436 packet.errcode = pt_errcode (errcode);
2437 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2438 &packet);
2439
2440 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2441 packet.offset, pt_errstr (packet.errcode));
2442 }
2443
2444 if (errcode != -pte_eos)
2445 warning (_("Failed to synchronize onto the Intel(R) Processor Trace "
2446 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2447 }
2448
2449 /* Update the packet history in BTINFO. */
2450
2451 static void
2452 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2453 {
2454 volatile struct gdb_exception except;
2455 struct pt_packet_decoder *decoder;
2456 struct btrace_data_pt *pt;
2457 struct pt_config config;
2458 int errcode;
2459
2460 pt = &btinfo->data.variant.pt;
2461
2462 /* Nothing to do if there is no trace. */
2463 if (pt->size == 0)
2464 return;
2465
2466 memset (&config, 0, sizeof(config));
2467
2468 config.size = sizeof (config);
2469 config.begin = pt->data;
2470 config.end = pt->data + pt->size;
2471
2472 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2473 config.cpu.family = pt->config.cpu.family;
2474 config.cpu.model = pt->config.cpu.model;
2475 config.cpu.stepping = pt->config.cpu.stepping;
2476
2477 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2478 if (errcode < 0)
2479 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
2480 pt_errstr (pt_errcode (errcode)));
2481
2482 decoder = pt_pkt_alloc_decoder (&config);
2483 if (decoder == NULL)
2484 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
2485
2486 TRY
2487 {
2488 btrace_maint_decode_pt (&btinfo->maint, decoder);
2489 }
2490 CATCH (except, RETURN_MASK_ALL)
2491 {
2492 pt_pkt_free_decoder (decoder);
2493
2494 if (except.reason < 0)
2495 throw_exception (except);
2496 }
2497 END_CATCH
2498
2499 pt_pkt_free_decoder (decoder);
2500 }
2501
2502 #endif /* !defined (HAVE_LIBIPT) */
2503
2504 /* Update the packet maintenance information for BTINFO and store the
2505 low and high bounds into BEGIN and END, respectively.
2506 Store the current iterator state into FROM and TO. */
2507
2508 static void
2509 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2510 unsigned int *begin, unsigned int *end,
2511 unsigned int *from, unsigned int *to)
2512 {
2513 switch (btinfo->data.format)
2514 {
2515 default:
2516 *begin = 0;
2517 *end = 0;
2518 *from = 0;
2519 *to = 0;
2520 break;
2521
2522 case BTRACE_FORMAT_BTS:
2523 /* Nothing to do - we operate directly on BTINFO->DATA. */
2524 *begin = 0;
2525 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2526 *from = btinfo->maint.variant.bts.packet_history.begin;
2527 *to = btinfo->maint.variant.bts.packet_history.end;
2528 break;
2529
2530 #if defined (HAVE_LIBIPT)
2531 case BTRACE_FORMAT_PT:
2532 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2533 btrace_maint_update_pt_packets (btinfo);
2534
2535 *begin = 0;
2536 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2537 *from = btinfo->maint.variant.pt.packet_history.begin;
2538 *to = btinfo->maint.variant.pt.packet_history.end;
2539 break;
2540 #endif /* defined (HAVE_LIBIPT) */
2541 }
2542 }
2543
2544 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2545 update the current iterator position. */
2546
2547 static void
2548 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2549 unsigned int begin, unsigned int end)
2550 {
2551 switch (btinfo->data.format)
2552 {
2553 default:
2554 break;
2555
2556 case BTRACE_FORMAT_BTS:
2557 {
2558 VEC (btrace_block_s) *blocks;
2559 unsigned int blk;
2560
2561 blocks = btinfo->data.variant.bts.blocks;
2562 for (blk = begin; blk < end; ++blk)
2563 {
2564 const btrace_block_s *block;
2565
2566 block = VEC_index (btrace_block_s, blocks, blk);
2567
2568 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2569 core_addr_to_string_nz (block->begin),
2570 core_addr_to_string_nz (block->end));
2571 }
2572
2573 btinfo->maint.variant.bts.packet_history.begin = begin;
2574 btinfo->maint.variant.bts.packet_history.end = end;
2575 }
2576 break;
2577
2578 #if defined (HAVE_LIBIPT)
2579 case BTRACE_FORMAT_PT:
2580 {
2581 VEC (btrace_pt_packet_s) *packets;
2582 unsigned int pkt;
2583
2584 packets = btinfo->maint.variant.pt.packets;
2585 for (pkt = begin; pkt < end; ++pkt)
2586 {
2587 const struct btrace_pt_packet *packet;
2588
2589 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2590
2591 printf_unfiltered ("%u\t", pkt);
2592 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2593
2594 if (packet->errcode == pte_ok)
2595 pt_print_packet (&packet->packet);
2596 else
2597 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2598
2599 printf_unfiltered ("\n");
2600 }
2601
2602 btinfo->maint.variant.pt.packet_history.begin = begin;
2603 btinfo->maint.variant.pt.packet_history.end = end;
2604 }
2605 break;
2606 #endif /* defined (HAVE_LIBIPT) */
2607 }
2608 }
2609
2610 /* Read a number from an argument string. */
2611
2612 static unsigned int
2613 get_uint (char **arg)
2614 {
2615 char *begin, *end, *pos;
2616 unsigned long number;
2617
2618 begin = *arg;
2619 pos = skip_spaces (begin);
2620
2621 if (!isdigit (*pos))
2622 error (_("Expected positive number, got: %s."), pos);
2623
2624 number = strtoul (pos, &end, 10);
2625 if (number > UINT_MAX)
2626 error (_("Number too big."));
2627
2628 *arg += (end - begin);
2629
2630 return (unsigned int) number;
2631 }
2632
2633 /* Read a context size from an argument string. */
2634
2635 static int
2636 get_context_size (char **arg)
2637 {
2638 char *pos;
2639 int number;
2640
2641 pos = skip_spaces (*arg);
2642
2643 if (!isdigit (*pos))
2644 error (_("Expected positive number, got: %s."), pos);
2645
2646 return strtol (pos, arg, 10);
2647 }
2648
2649 /* Complain about junk at the end of an argument string. */
2650
2651 static void
2652 no_chunk (char *arg)
2653 {
2654 if (*arg != 0)
2655 error (_("Junk after argument: %s."), arg);
2656 }
2657
2658 /* The "maintenance btrace packet-history" command. */
2659
2660 static void
2661 maint_btrace_packet_history_cmd (char *arg, int from_tty)
2662 {
2663 struct btrace_thread_info *btinfo;
2664 struct thread_info *tp;
2665 unsigned int size, begin, end, from, to;
2666
2667 tp = find_thread_ptid (inferior_ptid);
2668 if (tp == NULL)
2669 error (_("No thread."));
2670
2671 size = 10;
2672 btinfo = &tp->btrace;
2673
2674 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2675 if (begin == end)
2676 {
2677 printf_unfiltered (_("No trace.\n"));
2678 return;
2679 }
2680
2681 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2682 {
2683 from = to;
2684
2685 if (end - from < size)
2686 size = end - from;
2687 to = from + size;
2688 }
2689 else if (strcmp (arg, "-") == 0)
2690 {
2691 to = from;
2692
2693 if (to - begin < size)
2694 size = to - begin;
2695 from = to - size;
2696 }
2697 else
2698 {
2699 from = get_uint (&arg);
2700 if (end <= from)
2701 error (_("'%u' is out of range."), from);
2702
2703 arg = skip_spaces (arg);
2704 if (*arg == ',')
2705 {
2706 arg = skip_spaces (++arg);
2707
2708 if (*arg == '+')
2709 {
2710 arg += 1;
2711 size = get_context_size (&arg);
2712
2713 no_chunk (arg);
2714
2715 if (end - from < size)
2716 size = end - from;
2717 to = from + size;
2718 }
2719 else if (*arg == '-')
2720 {
2721 arg += 1;
2722 size = get_context_size (&arg);
2723
2724 no_chunk (arg);
2725
2726 /* Include the packet given as first argument. */
2727 from += 1;
2728 to = from;
2729
2730 if (to - begin < size)
2731 size = to - begin;
2732 from = to - size;
2733 }
2734 else
2735 {
2736 to = get_uint (&arg);
2737
2738 /* Include the packet at the second argument and silently
2739 truncate the range. */
2740 if (to < end)
2741 to += 1;
2742 else
2743 to = end;
2744
2745 no_chunk (arg);
2746 }
2747 }
2748 else
2749 {
2750 no_chunk (arg);
2751
2752 if (end - from < size)
2753 size = end - from;
2754 to = from + size;
2755 }
2756
2757 dont_repeat ();
2758 }
2759
2760 btrace_maint_print_packets (btinfo, from, to);
2761 }
2762
2763 /* The "maintenance btrace clear-packet-history" command. */
2764
2765 static void
2766 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2767 {
2768 struct btrace_thread_info *btinfo;
2769 struct thread_info *tp;
2770
2771 if (args != NULL && *args != 0)
2772 error (_("Invalid argument."));
2773
2774 tp = find_thread_ptid (inferior_ptid);
2775 if (tp == NULL)
2776 error (_("No thread."));
2777
2778 btinfo = &tp->btrace;
2779
2780 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2781 btrace_maint_clear (btinfo);
2782 btrace_data_clear (&btinfo->data);
2783 }
2784
2785 /* The "maintenance btrace clear" command. */
2786
2787 static void
2788 maint_btrace_clear_cmd (char *args, int from_tty)
2789 {
2790 struct btrace_thread_info *btinfo;
2791 struct thread_info *tp;
2792
2793 if (args != NULL && *args != 0)
2794 error (_("Invalid argument."));
2795
2796 tp = find_thread_ptid (inferior_ptid);
2797 if (tp == NULL)
2798 error (_("No thread."));
2799
2800 btrace_clear (tp);
2801 }
2802
2803 /* The "maintenance btrace" command. */
2804
2805 static void
2806 maint_btrace_cmd (char *args, int from_tty)
2807 {
2808 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2809 gdb_stdout);
2810 }
2811
2812 /* The "maintenance set btrace" command. */
2813
2814 static void
2815 maint_btrace_set_cmd (char *args, int from_tty)
2816 {
2817 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2818 gdb_stdout);
2819 }
2820
2821 /* The "maintenance show btrace" command. */
2822
2823 static void
2824 maint_btrace_show_cmd (char *args, int from_tty)
2825 {
2826 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2827 all_commands, gdb_stdout);
2828 }
2829
2830 /* The "maintenance set btrace pt" command. */
2831
2832 static void
2833 maint_btrace_pt_set_cmd (char *args, int from_tty)
2834 {
2835 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2836 all_commands, gdb_stdout);
2837 }
2838
2839 /* The "maintenance show btrace pt" command. */
2840
2841 static void
2842 maint_btrace_pt_show_cmd (char *args, int from_tty)
2843 {
2844 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2845 all_commands, gdb_stdout);
2846 }
2847
2848 /* The "maintenance info btrace" command. */
2849
2850 static void
2851 maint_info_btrace_cmd (char *args, int from_tty)
2852 {
2853 struct btrace_thread_info *btinfo;
2854 struct thread_info *tp;
2855 const struct btrace_config *conf;
2856
2857 if (args != NULL && *args != 0)
2858 error (_("Invalid argument."));
2859
2860 tp = find_thread_ptid (inferior_ptid);
2861 if (tp == NULL)
2862 error (_("No thread."));
2863
2864 btinfo = &tp->btrace;
2865
2866 conf = btrace_conf (btinfo);
2867 if (conf == NULL)
2868 error (_("No btrace configuration."));
2869
2870 printf_unfiltered (_("Format: %s.\n"),
2871 btrace_format_string (conf->format));
2872
2873 switch (conf->format)
2874 {
2875 default:
2876 break;
2877
2878 case BTRACE_FORMAT_BTS:
2879 printf_unfiltered (_("Number of packets: %u.\n"),
2880 VEC_length (btrace_block_s,
2881 btinfo->data.variant.bts.blocks));
2882 break;
2883
2884 #if defined (HAVE_LIBIPT)
2885 case BTRACE_FORMAT_PT:
2886 {
2887 struct pt_version version;
2888
2889 version = pt_library_version ();
2890 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2891 version.minor, version.build,
2892 version.ext != NULL ? version.ext : "");
2893
2894 btrace_maint_update_pt_packets (btinfo);
2895 printf_unfiltered (_("Number of packets: %u.\n"),
2896 VEC_length (btrace_pt_packet_s,
2897 btinfo->maint.variant.pt.packets));
2898 }
2899 break;
2900 #endif /* defined (HAVE_LIBIPT) */
2901 }
2902 }
2903
2904 /* The "maint show btrace pt skip-pad" show value function. */
2905
2906 static void
2907 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2908 struct cmd_list_element *c,
2909 const char *value)
2910 {
2911 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2912 }
2913
2914
2915 /* Initialize btrace maintenance commands. */
2916
2917 void _initialize_btrace (void);
2918 void
2919 _initialize_btrace (void)
2920 {
2921 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2922 _("Info about branch tracing data."), &maintenanceinfolist);
2923
2924 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2925 _("Branch tracing maintenance commands."),
2926 &maint_btrace_cmdlist, "maintenance btrace ",
2927 0, &maintenancelist);
2928
2929 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2930 Set branch tracing specific variables."),
2931 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2932 0, &maintenance_set_cmdlist);
2933
2934 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
2935 Set Intel(R) Processor Trace specific variables."),
2936 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2937 0, &maint_btrace_set_cmdlist);
2938
2939 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2940 Show branch tracing specific variables."),
2941 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2942 0, &maintenance_show_cmdlist);
2943
2944 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
2945 Show Intel(R) Processor Trace specific variables."),
2946 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2947 0, &maint_btrace_show_cmdlist);
2948
2949 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2950 &maint_btrace_pt_skip_pad, _("\
2951 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2952 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2953 When enabled, PAD packets are ignored in the btrace packet history."),
2954 NULL, show_maint_btrace_pt_skip_pad,
2955 &maint_btrace_pt_set_cmdlist,
2956 &maint_btrace_pt_show_cmdlist);
2957
2958 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2959 _("Print the raw branch tracing data.\n\
2960 With no argument, print ten more packets after the previous ten-line print.\n\
2961 With '-' as argument print ten packets before a previous ten-line print.\n\
2962 One argument specifies the starting packet of a ten-line print.\n\
2963 Two arguments with comma between specify starting and ending packets to \
2964 print.\n\
2965 Preceded with '+'/'-' the second argument specifies the distance from the \
2966 first.\n"),
2967 &maint_btrace_cmdlist);
2968
2969 add_cmd ("clear-packet-history", class_maintenance,
2970 maint_btrace_clear_packet_history_cmd,
2971 _("Clears the branch tracing packet history.\n\
2972 Discards the raw branch tracing data but not the execution history data.\n\
2973 "),
2974 &maint_btrace_cmdlist);
2975
2976 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
2977 _("Clears the branch tracing data.\n\
2978 Discards the raw branch tracing data and the execution history data.\n\
2979 The next 'record' command will fetch the branch tracing data anew.\n\
2980 "),
2981 &maint_btrace_cmdlist);
2982
2983 }