]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/btrace.c
4292dc7784d8c95a303ce4d0530f7c0ed5e6b4cc
[thirdparty/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34 #include "rsp-low.h"
35 #include "gdbcmd.h"
36 #include "cli/cli-utils.h"
37
38 #include <inttypes.h>
39 #include <ctype.h>
40
41 /* Command lists for btrace maintenance commands. */
42 static struct cmd_list_element *maint_btrace_cmdlist;
43 static struct cmd_list_element *maint_btrace_set_cmdlist;
44 static struct cmd_list_element *maint_btrace_show_cmdlist;
45 static struct cmd_list_element *maint_btrace_pt_set_cmdlist;
46 static struct cmd_list_element *maint_btrace_pt_show_cmdlist;
47
48 /* Control whether to skip PAD packets when computing the packet history. */
49 static int maint_btrace_pt_skip_pad = 1;
50
51 static void btrace_add_pc (struct thread_info *tp);
52
53 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
54 when used in if statements. */
55
56 #define DEBUG(msg, args...) \
57 do \
58 { \
59 if (record_debug != 0) \
60 fprintf_unfiltered (gdb_stdlog, \
61 "[btrace] " msg "\n", ##args); \
62 } \
63 while (0)
64
65 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
66
67 /* Return the function name of a recorded function segment for printing.
68 This function never returns NULL. */
69
70 static const char *
71 ftrace_print_function_name (const struct btrace_function *bfun)
72 {
73 struct minimal_symbol *msym;
74 struct symbol *sym;
75
76 msym = bfun->msym;
77 sym = bfun->sym;
78
79 if (sym != NULL)
80 return SYMBOL_PRINT_NAME (sym);
81
82 if (msym != NULL)
83 return MSYMBOL_PRINT_NAME (msym);
84
85 return "<unknown>";
86 }
87
88 /* Return the file name of a recorded function segment for printing.
89 This function never returns NULL. */
90
91 static const char *
92 ftrace_print_filename (const struct btrace_function *bfun)
93 {
94 struct symbol *sym;
95 const char *filename;
96
97 sym = bfun->sym;
98
99 if (sym != NULL)
100 filename = symtab_to_filename_for_display (symbol_symtab (sym));
101 else
102 filename = "<unknown>";
103
104 return filename;
105 }
106
107 /* Return a string representation of the address of an instruction.
108 This function never returns NULL. */
109
110 static const char *
111 ftrace_print_insn_addr (const struct btrace_insn *insn)
112 {
113 if (insn == NULL)
114 return "<nil>";
115
116 return core_addr_to_string_nz (insn->pc);
117 }
118
119 /* Print an ftrace debug status message. */
120
121 static void
122 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
123 {
124 const char *fun, *file;
125 unsigned int ibegin, iend;
126 int level;
127
128 fun = ftrace_print_function_name (bfun);
129 file = ftrace_print_filename (bfun);
130 level = bfun->level;
131
132 ibegin = bfun->insn_offset;
133 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
134
135 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
136 prefix, fun, file, level, ibegin, iend);
137 }
138
139 /* Return non-zero if BFUN does not match MFUN and FUN,
140 return zero otherwise. */
141
142 static int
143 ftrace_function_switched (const struct btrace_function *bfun,
144 const struct minimal_symbol *mfun,
145 const struct symbol *fun)
146 {
147 struct minimal_symbol *msym;
148 struct symbol *sym;
149
150 msym = bfun->msym;
151 sym = bfun->sym;
152
153 /* If the minimal symbol changed, we certainly switched functions. */
154 if (mfun != NULL && msym != NULL
155 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
156 return 1;
157
158 /* If the symbol changed, we certainly switched functions. */
159 if (fun != NULL && sym != NULL)
160 {
161 const char *bfname, *fname;
162
163 /* Check the function name. */
164 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
165 return 1;
166
167 /* Check the location of those functions, as well. */
168 bfname = symtab_to_fullname (symbol_symtab (sym));
169 fname = symtab_to_fullname (symbol_symtab (fun));
170 if (filename_cmp (fname, bfname) != 0)
171 return 1;
172 }
173
174 /* If we lost symbol information, we switched functions. */
175 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
176 return 1;
177
178 /* If we gained symbol information, we switched functions. */
179 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
180 return 1;
181
182 return 0;
183 }
184
185 /* Allocate and initialize a new branch trace function segment.
186 PREV is the chronologically preceding function segment.
187 MFUN and FUN are the symbol information we have for this function. */
188
189 static struct btrace_function *
190 ftrace_new_function (struct btrace_function *prev,
191 struct minimal_symbol *mfun,
192 struct symbol *fun)
193 {
194 struct btrace_function *bfun;
195
196 bfun = XCNEW (struct btrace_function);
197
198 bfun->msym = mfun;
199 bfun->sym = fun;
200 bfun->flow.prev = prev;
201
202 if (prev == NULL)
203 {
204 /* Start counting at one. */
205 bfun->number = 1;
206 bfun->insn_offset = 1;
207 }
208 else
209 {
210 gdb_assert (prev->flow.next == NULL);
211 prev->flow.next = bfun;
212
213 bfun->number = prev->number + 1;
214 bfun->insn_offset = (prev->insn_offset
215 + VEC_length (btrace_insn_s, prev->insn));
216 bfun->level = prev->level;
217 }
218
219 return bfun;
220 }
221
222 /* Update the UP field of a function segment. */
223
224 static void
225 ftrace_update_caller (struct btrace_function *bfun,
226 struct btrace_function *caller,
227 enum btrace_function_flag flags)
228 {
229 if (bfun->up != NULL)
230 ftrace_debug (bfun, "updating caller");
231
232 bfun->up = caller;
233 bfun->flags = flags;
234
235 ftrace_debug (bfun, "set caller");
236 }
237
238 /* Fix up the caller for all segments of a function. */
239
240 static void
241 ftrace_fixup_caller (struct btrace_function *bfun,
242 struct btrace_function *caller,
243 enum btrace_function_flag flags)
244 {
245 struct btrace_function *prev, *next;
246
247 ftrace_update_caller (bfun, caller, flags);
248
249 /* Update all function segments belonging to the same function. */
250 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
251 ftrace_update_caller (prev, caller, flags);
252
253 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
254 ftrace_update_caller (next, caller, flags);
255 }
256
257 /* Add a new function segment for a call.
258 CALLER is the chronologically preceding function segment.
259 MFUN and FUN are the symbol information we have for this function. */
260
261 static struct btrace_function *
262 ftrace_new_call (struct btrace_function *caller,
263 struct minimal_symbol *mfun,
264 struct symbol *fun)
265 {
266 struct btrace_function *bfun;
267
268 bfun = ftrace_new_function (caller, mfun, fun);
269 bfun->up = caller;
270 bfun->level += 1;
271
272 ftrace_debug (bfun, "new call");
273
274 return bfun;
275 }
276
277 /* Add a new function segment for a tail call.
278 CALLER is the chronologically preceding function segment.
279 MFUN and FUN are the symbol information we have for this function. */
280
281 static struct btrace_function *
282 ftrace_new_tailcall (struct btrace_function *caller,
283 struct minimal_symbol *mfun,
284 struct symbol *fun)
285 {
286 struct btrace_function *bfun;
287
288 bfun = ftrace_new_function (caller, mfun, fun);
289 bfun->up = caller;
290 bfun->level += 1;
291 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
292
293 ftrace_debug (bfun, "new tail call");
294
295 return bfun;
296 }
297
298 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
299 symbol information. */
300
301 static struct btrace_function *
302 ftrace_find_caller (struct btrace_function *bfun,
303 struct minimal_symbol *mfun,
304 struct symbol *fun)
305 {
306 for (; bfun != NULL; bfun = bfun->up)
307 {
308 /* Skip functions with incompatible symbol information. */
309 if (ftrace_function_switched (bfun, mfun, fun))
310 continue;
311
312 /* This is the function segment we're looking for. */
313 break;
314 }
315
316 return bfun;
317 }
318
319 /* Find the innermost caller in the back trace of BFUN, skipping all
320 function segments that do not end with a call instruction (e.g.
321 tail calls ending with a jump). */
322
323 static struct btrace_function *
324 ftrace_find_call (struct btrace_function *bfun)
325 {
326 for (; bfun != NULL; bfun = bfun->up)
327 {
328 struct btrace_insn *last;
329
330 /* Skip gaps. */
331 if (bfun->errcode != 0)
332 continue;
333
334 last = VEC_last (btrace_insn_s, bfun->insn);
335
336 if (last->iclass == BTRACE_INSN_CALL)
337 break;
338 }
339
340 return bfun;
341 }
342
343 /* Add a continuation segment for a function into which we return.
344 PREV is the chronologically preceding function segment.
345 MFUN and FUN are the symbol information we have for this function. */
346
347 static struct btrace_function *
348 ftrace_new_return (struct btrace_function *prev,
349 struct minimal_symbol *mfun,
350 struct symbol *fun)
351 {
352 struct btrace_function *bfun, *caller;
353
354 bfun = ftrace_new_function (prev, mfun, fun);
355
356 /* It is important to start at PREV's caller. Otherwise, we might find
357 PREV itself, if PREV is a recursive function. */
358 caller = ftrace_find_caller (prev->up, mfun, fun);
359 if (caller != NULL)
360 {
361 /* The caller of PREV is the preceding btrace function segment in this
362 function instance. */
363 gdb_assert (caller->segment.next == NULL);
364
365 caller->segment.next = bfun;
366 bfun->segment.prev = caller;
367
368 /* Maintain the function level. */
369 bfun->level = caller->level;
370
371 /* Maintain the call stack. */
372 bfun->up = caller->up;
373 bfun->flags = caller->flags;
374
375 ftrace_debug (bfun, "new return");
376 }
377 else
378 {
379 /* We did not find a caller. This could mean that something went
380 wrong or that the call is simply not included in the trace. */
381
382 /* Let's search for some actual call. */
383 caller = ftrace_find_call (prev->up);
384 if (caller == NULL)
385 {
386 /* There is no call in PREV's back trace. We assume that the
387 branch trace did not include it. */
388
389 /* Let's find the topmost call function - this skips tail calls. */
390 while (prev->up != NULL)
391 prev = prev->up;
392
393 /* We maintain levels for a series of returns for which we have
394 not seen the calls.
395 We start at the preceding function's level in case this has
396 already been a return for which we have not seen the call.
397 We start at level 0 otherwise, to handle tail calls correctly. */
398 bfun->level = min (0, prev->level) - 1;
399
400 /* Fix up the call stack for PREV. */
401 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
402
403 ftrace_debug (bfun, "new return - no caller");
404 }
405 else
406 {
407 /* There is a call in PREV's back trace to which we should have
408 returned. Let's remain at this level. */
409 bfun->level = prev->level;
410
411 ftrace_debug (bfun, "new return - unknown caller");
412 }
413 }
414
415 return bfun;
416 }
417
418 /* Add a new function segment for a function switch.
419 PREV is the chronologically preceding function segment.
420 MFUN and FUN are the symbol information we have for this function. */
421
422 static struct btrace_function *
423 ftrace_new_switch (struct btrace_function *prev,
424 struct minimal_symbol *mfun,
425 struct symbol *fun)
426 {
427 struct btrace_function *bfun;
428
429 /* This is an unexplained function switch. The call stack will likely
430 be wrong at this point. */
431 bfun = ftrace_new_function (prev, mfun, fun);
432
433 ftrace_debug (bfun, "new switch");
434
435 return bfun;
436 }
437
438 /* Add a new function segment for a gap in the trace due to a decode error.
439 PREV is the chronologically preceding function segment.
440 ERRCODE is the format-specific error code. */
441
442 static struct btrace_function *
443 ftrace_new_gap (struct btrace_function *prev, int errcode)
444 {
445 struct btrace_function *bfun;
446
447 /* We hijack prev if it was empty. */
448 if (prev != NULL && prev->errcode == 0
449 && VEC_empty (btrace_insn_s, prev->insn))
450 bfun = prev;
451 else
452 bfun = ftrace_new_function (prev, NULL, NULL);
453
454 bfun->errcode = errcode;
455
456 ftrace_debug (bfun, "new gap");
457
458 return bfun;
459 }
460
461 /* Update BFUN with respect to the instruction at PC. This may create new
462 function segments.
463 Return the chronologically latest function segment, never NULL. */
464
465 static struct btrace_function *
466 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
467 {
468 struct bound_minimal_symbol bmfun;
469 struct minimal_symbol *mfun;
470 struct symbol *fun;
471 struct btrace_insn *last;
472
473 /* Try to determine the function we're in. We use both types of symbols
474 to avoid surprises when we sometimes get a full symbol and sometimes
475 only a minimal symbol. */
476 fun = find_pc_function (pc);
477 bmfun = lookup_minimal_symbol_by_pc (pc);
478 mfun = bmfun.minsym;
479
480 if (fun == NULL && mfun == NULL)
481 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
482
483 /* If we didn't have a function or if we had a gap before, we create one. */
484 if (bfun == NULL || bfun->errcode != 0)
485 return ftrace_new_function (bfun, mfun, fun);
486
487 /* Check the last instruction, if we have one.
488 We do this check first, since it allows us to fill in the call stack
489 links in addition to the normal flow links. */
490 last = NULL;
491 if (!VEC_empty (btrace_insn_s, bfun->insn))
492 last = VEC_last (btrace_insn_s, bfun->insn);
493
494 if (last != NULL)
495 {
496 switch (last->iclass)
497 {
498 case BTRACE_INSN_RETURN:
499 {
500 const char *fname;
501
502 /* On some systems, _dl_runtime_resolve returns to the resolved
503 function instead of jumping to it. From our perspective,
504 however, this is a tailcall.
505 If we treated it as return, we wouldn't be able to find the
506 resolved function in our stack back trace. Hence, we would
507 lose the current stack back trace and start anew with an empty
508 back trace. When the resolved function returns, we would then
509 create a stack back trace with the same function names but
510 different frame id's. This will confuse stepping. */
511 fname = ftrace_print_function_name (bfun);
512 if (strcmp (fname, "_dl_runtime_resolve") == 0)
513 return ftrace_new_tailcall (bfun, mfun, fun);
514
515 return ftrace_new_return (bfun, mfun, fun);
516 }
517
518 case BTRACE_INSN_CALL:
519 /* Ignore calls to the next instruction. They are used for PIC. */
520 if (last->pc + last->size == pc)
521 break;
522
523 return ftrace_new_call (bfun, mfun, fun);
524
525 case BTRACE_INSN_JUMP:
526 {
527 CORE_ADDR start;
528
529 start = get_pc_function_start (pc);
530
531 /* If we can't determine the function for PC, we treat a jump at
532 the end of the block as tail call. */
533 if (start == 0 || start == pc)
534 return ftrace_new_tailcall (bfun, mfun, fun);
535 }
536 }
537 }
538
539 /* Check if we're switching functions for some other reason. */
540 if (ftrace_function_switched (bfun, mfun, fun))
541 {
542 DEBUG_FTRACE ("switching from %s in %s at %s",
543 ftrace_print_insn_addr (last),
544 ftrace_print_function_name (bfun),
545 ftrace_print_filename (bfun));
546
547 return ftrace_new_switch (bfun, mfun, fun);
548 }
549
550 return bfun;
551 }
552
553 /* Add the instruction at PC to BFUN's instructions. */
554
555 static void
556 ftrace_update_insns (struct btrace_function *bfun,
557 const struct btrace_insn *insn)
558 {
559 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
560
561 if (record_debug > 1)
562 ftrace_debug (bfun, "update insn");
563 }
564
565 /* Classify the instruction at PC. */
566
567 static enum btrace_insn_class
568 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
569 {
570 enum btrace_insn_class iclass;
571
572 iclass = BTRACE_INSN_OTHER;
573 TRY
574 {
575 if (gdbarch_insn_is_call (gdbarch, pc))
576 iclass = BTRACE_INSN_CALL;
577 else if (gdbarch_insn_is_ret (gdbarch, pc))
578 iclass = BTRACE_INSN_RETURN;
579 else if (gdbarch_insn_is_jump (gdbarch, pc))
580 iclass = BTRACE_INSN_JUMP;
581 }
582 CATCH (error, RETURN_MASK_ERROR)
583 {
584 }
585 END_CATCH
586
587 return iclass;
588 }
589
590 /* Compute the function branch trace from BTS trace. */
591
592 static void
593 btrace_compute_ftrace_bts (struct thread_info *tp,
594 const struct btrace_data_bts *btrace)
595 {
596 struct btrace_thread_info *btinfo;
597 struct btrace_function *begin, *end;
598 struct gdbarch *gdbarch;
599 unsigned int blk, ngaps;
600 int level;
601
602 gdbarch = target_gdbarch ();
603 btinfo = &tp->btrace;
604 begin = btinfo->begin;
605 end = btinfo->end;
606 ngaps = btinfo->ngaps;
607 level = begin != NULL ? -btinfo->level : INT_MAX;
608 blk = VEC_length (btrace_block_s, btrace->blocks);
609
610 while (blk != 0)
611 {
612 btrace_block_s *block;
613 CORE_ADDR pc;
614
615 blk -= 1;
616
617 block = VEC_index (btrace_block_s, btrace->blocks, blk);
618 pc = block->begin;
619
620 for (;;)
621 {
622 struct btrace_insn insn;
623 int size;
624
625 /* We should hit the end of the block. Warn if we went too far. */
626 if (block->end < pc)
627 {
628 /* Indicate the gap in the trace - unless we're at the
629 beginning. */
630 if (begin != NULL)
631 {
632 warning (_("Recorded trace may be corrupted around %s."),
633 core_addr_to_string_nz (pc));
634
635 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
636 ngaps += 1;
637 }
638 break;
639 }
640
641 end = ftrace_update_function (end, pc);
642 if (begin == NULL)
643 begin = end;
644
645 /* Maintain the function level offset.
646 For all but the last block, we do it here. */
647 if (blk != 0)
648 level = min (level, end->level);
649
650 size = 0;
651 TRY
652 {
653 size = gdb_insn_length (gdbarch, pc);
654 }
655 CATCH (error, RETURN_MASK_ERROR)
656 {
657 }
658 END_CATCH
659
660 insn.pc = pc;
661 insn.size = size;
662 insn.iclass = ftrace_classify_insn (gdbarch, pc);
663 insn.flags = 0;
664
665 ftrace_update_insns (end, &insn);
666
667 /* We're done once we pushed the instruction at the end. */
668 if (block->end == pc)
669 break;
670
671 /* We can't continue if we fail to compute the size. */
672 if (size <= 0)
673 {
674 warning (_("Recorded trace may be incomplete around %s."),
675 core_addr_to_string_nz (pc));
676
677 /* Indicate the gap in the trace. We just added INSN so we're
678 not at the beginning. */
679 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
680 ngaps += 1;
681
682 break;
683 }
684
685 pc += size;
686
687 /* Maintain the function level offset.
688 For the last block, we do it here to not consider the last
689 instruction.
690 Since the last instruction corresponds to the current instruction
691 and is not really part of the execution history, it shouldn't
692 affect the level. */
693 if (blk == 0)
694 level = min (level, end->level);
695 }
696 }
697
698 btinfo->begin = begin;
699 btinfo->end = end;
700 btinfo->ngaps = ngaps;
701
702 /* LEVEL is the minimal function level of all btrace function segments.
703 Define the global level offset to -LEVEL so all function levels are
704 normalized to start at zero. */
705 btinfo->level = -level;
706 }
707
708 #if defined (HAVE_LIBIPT)
709
710 static enum btrace_insn_class
711 pt_reclassify_insn (enum pt_insn_class iclass)
712 {
713 switch (iclass)
714 {
715 case ptic_call:
716 return BTRACE_INSN_CALL;
717
718 case ptic_return:
719 return BTRACE_INSN_RETURN;
720
721 case ptic_jump:
722 return BTRACE_INSN_JUMP;
723
724 default:
725 return BTRACE_INSN_OTHER;
726 }
727 }
728
729 /* Return the btrace instruction flags for INSN. */
730
731 static enum btrace_insn_flag
732 pt_btrace_insn_flags (const struct pt_insn *insn)
733 {
734 enum btrace_insn_flag flags = 0;
735
736 if (insn->speculative)
737 flags |= BTRACE_INSN_FLAG_SPECULATIVE;
738
739 return flags;
740 }
741
742 /* Add function branch trace using DECODER. */
743
744 static void
745 ftrace_add_pt (struct pt_insn_decoder *decoder,
746 struct btrace_function **pbegin,
747 struct btrace_function **pend, int *plevel,
748 unsigned int *ngaps)
749 {
750 struct btrace_function *begin, *end, *upd;
751 uint64_t offset;
752 int errcode, nerrors;
753
754 begin = *pbegin;
755 end = *pend;
756 nerrors = 0;
757 for (;;)
758 {
759 struct btrace_insn btinsn;
760 struct pt_insn insn;
761
762 errcode = pt_insn_sync_forward (decoder);
763 if (errcode < 0)
764 {
765 if (errcode != -pte_eos)
766 warning (_("Failed to synchronize onto the Intel(R) Processor "
767 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
768 break;
769 }
770
771 memset (&btinsn, 0, sizeof (btinsn));
772 for (;;)
773 {
774 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
775 if (errcode < 0)
776 break;
777
778 /* Look for gaps in the trace - unless we're at the beginning. */
779 if (begin != NULL)
780 {
781 /* Tracing is disabled and re-enabled each time we enter the
782 kernel. Most times, we continue from the same instruction we
783 stopped before. This is indicated via the RESUMED instruction
784 flag. The ENABLED instruction flag means that we continued
785 from some other instruction. Indicate this as a trace gap. */
786 if (insn.enabled)
787 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
788
789 /* Indicate trace overflows. */
790 if (insn.resynced)
791 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
792 }
793
794 upd = ftrace_update_function (end, insn.ip);
795 if (upd != end)
796 {
797 *pend = end = upd;
798
799 if (begin == NULL)
800 *pbegin = begin = upd;
801 }
802
803 /* Maintain the function level offset. */
804 *plevel = min (*plevel, end->level);
805
806 btinsn.pc = (CORE_ADDR) insn.ip;
807 btinsn.size = (gdb_byte) insn.size;
808 btinsn.iclass = pt_reclassify_insn (insn.iclass);
809 btinsn.flags = pt_btrace_insn_flags (&insn);
810
811 ftrace_update_insns (end, &btinsn);
812 }
813
814 if (errcode == -pte_eos)
815 break;
816
817 /* If the gap is at the very beginning, we ignore it - we will have
818 less trace, but we won't have any holes in the trace. */
819 if (begin == NULL)
820 continue;
821
822 pt_insn_get_offset (decoder, &offset);
823
824 warning (_("Failed to decode Intel(R) Processor Trace near trace "
825 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
826 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
827
828 /* Indicate the gap in the trace. */
829 *pend = end = ftrace_new_gap (end, errcode);
830 *ngaps += 1;
831 }
832
833 if (nerrors > 0)
834 warning (_("The recorded execution trace may have gaps."));
835 }
836
837 /* A callback function to allow the trace decoder to read the inferior's
838 memory. */
839
840 static int
841 btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
842 const struct pt_asid *asid, uint64_t pc,
843 void *context)
844 {
845 int errcode;
846
847 TRY
848 {
849 errcode = target_read_code ((CORE_ADDR) pc, buffer, size);
850 if (errcode != 0)
851 return -pte_nomap;
852 }
853 CATCH (error, RETURN_MASK_ERROR)
854 {
855 return -pte_nomap;
856 }
857 END_CATCH
858
859 return size;
860 }
861
862 /* Translate the vendor from one enum to another. */
863
864 static enum pt_cpu_vendor
865 pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
866 {
867 switch (vendor)
868 {
869 default:
870 return pcv_unknown;
871
872 case CV_INTEL:
873 return pcv_intel;
874 }
875 }
876
877 /* Finalize the function branch trace after decode. */
878
879 static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
880 struct thread_info *tp, int level)
881 {
882 pt_insn_free_decoder (decoder);
883
884 /* LEVEL is the minimal function level of all btrace function segments.
885 Define the global level offset to -LEVEL so all function levels are
886 normalized to start at zero. */
887 tp->btrace.level = -level;
888
889 /* Add a single last instruction entry for the current PC.
890 This allows us to compute the backtrace at the current PC using both
891 standard unwind and btrace unwind.
892 This extra entry is ignored by all record commands. */
893 btrace_add_pc (tp);
894 }
895
896 /* Compute the function branch trace from Intel(R) Processor Trace. */
897
898 static void
899 btrace_compute_ftrace_pt (struct thread_info *tp,
900 const struct btrace_data_pt *btrace)
901 {
902 struct btrace_thread_info *btinfo;
903 struct pt_insn_decoder *decoder;
904 struct pt_config config;
905 int level, errcode;
906
907 if (btrace->size == 0)
908 return;
909
910 btinfo = &tp->btrace;
911 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
912
913 pt_config_init(&config);
914 config.begin = btrace->data;
915 config.end = btrace->data + btrace->size;
916
917 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
918 config.cpu.family = btrace->config.cpu.family;
919 config.cpu.model = btrace->config.cpu.model;
920 config.cpu.stepping = btrace->config.cpu.stepping;
921
922 errcode = pt_cpu_errata (&config.errata, &config.cpu);
923 if (errcode < 0)
924 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
925 pt_errstr (pt_errcode (errcode)));
926
927 decoder = pt_insn_alloc_decoder (&config);
928 if (decoder == NULL)
929 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
930
931 TRY
932 {
933 struct pt_image *image;
934
935 image = pt_insn_get_image(decoder);
936 if (image == NULL)
937 error (_("Failed to configure the Intel(R) Processor Trace decoder."));
938
939 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
940 if (errcode < 0)
941 error (_("Failed to configure the Intel(R) Processor Trace decoder: "
942 "%s."), pt_errstr (pt_errcode (errcode)));
943
944 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
945 &btinfo->ngaps);
946 }
947 CATCH (error, RETURN_MASK_ALL)
948 {
949 /* Indicate a gap in the trace if we quit trace processing. */
950 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
951 {
952 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
953 btinfo->ngaps++;
954 }
955
956 btrace_finalize_ftrace_pt (decoder, tp, level);
957
958 throw_exception (error);
959 }
960 END_CATCH
961
962 btrace_finalize_ftrace_pt (decoder, tp, level);
963 }
964
965 #else /* defined (HAVE_LIBIPT) */
966
967 static void
968 btrace_compute_ftrace_pt (struct thread_info *tp,
969 const struct btrace_data_pt *btrace)
970 {
971 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
972 }
973
974 #endif /* defined (HAVE_LIBIPT) */
975
976 /* Compute the function branch trace from a block branch trace BTRACE for
977 a thread given by BTINFO. */
978
979 static void
980 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
981 {
982 DEBUG ("compute ftrace");
983
984 switch (btrace->format)
985 {
986 case BTRACE_FORMAT_NONE:
987 return;
988
989 case BTRACE_FORMAT_BTS:
990 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
991 return;
992
993 case BTRACE_FORMAT_PT:
994 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
995 return;
996 }
997
998 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
999 }
1000
1001 /* Add an entry for the current PC. */
1002
1003 static void
1004 btrace_add_pc (struct thread_info *tp)
1005 {
1006 struct btrace_data btrace;
1007 struct btrace_block *block;
1008 struct regcache *regcache;
1009 struct cleanup *cleanup;
1010 CORE_ADDR pc;
1011
1012 regcache = get_thread_regcache (tp->ptid);
1013 pc = regcache_read_pc (regcache);
1014
1015 btrace_data_init (&btrace);
1016 btrace.format = BTRACE_FORMAT_BTS;
1017 btrace.variant.bts.blocks = NULL;
1018
1019 cleanup = make_cleanup_btrace_data (&btrace);
1020
1021 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
1022 block->begin = pc;
1023 block->end = pc;
1024
1025 btrace_compute_ftrace (tp, &btrace);
1026
1027 do_cleanups (cleanup);
1028 }
1029
1030 /* See btrace.h. */
1031
1032 void
1033 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
1034 {
1035 if (tp->btrace.target != NULL)
1036 return;
1037
1038 if (!target_supports_btrace (conf->format))
1039 error (_("Target does not support branch tracing."));
1040
1041 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1042
1043 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
1044
1045 /* Add an entry for the current PC so we start tracing from where we
1046 enabled it. */
1047 if (tp->btrace.target != NULL)
1048 btrace_add_pc (tp);
1049 }
1050
1051 /* See btrace.h. */
1052
1053 const struct btrace_config *
1054 btrace_conf (const struct btrace_thread_info *btinfo)
1055 {
1056 if (btinfo->target == NULL)
1057 return NULL;
1058
1059 return target_btrace_conf (btinfo->target);
1060 }
1061
1062 /* See btrace.h. */
1063
1064 void
1065 btrace_disable (struct thread_info *tp)
1066 {
1067 struct btrace_thread_info *btp = &tp->btrace;
1068 int errcode = 0;
1069
1070 if (btp->target == NULL)
1071 return;
1072
1073 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1074
1075 target_disable_btrace (btp->target);
1076 btp->target = NULL;
1077
1078 btrace_clear (tp);
1079 }
1080
1081 /* See btrace.h. */
1082
1083 void
1084 btrace_teardown (struct thread_info *tp)
1085 {
1086 struct btrace_thread_info *btp = &tp->btrace;
1087 int errcode = 0;
1088
1089 if (btp->target == NULL)
1090 return;
1091
1092 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1093
1094 target_teardown_btrace (btp->target);
1095 btp->target = NULL;
1096
1097 btrace_clear (tp);
1098 }
1099
1100 /* Stitch branch trace in BTS format. */
1101
1102 static int
1103 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
1104 {
1105 struct btrace_thread_info *btinfo;
1106 struct btrace_function *last_bfun;
1107 struct btrace_insn *last_insn;
1108 btrace_block_s *first_new_block;
1109
1110 btinfo = &tp->btrace;
1111 last_bfun = btinfo->end;
1112 gdb_assert (last_bfun != NULL);
1113 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1114
1115 /* If the existing trace ends with a gap, we just glue the traces
1116 together. We need to drop the last (i.e. chronologically first) block
1117 of the new trace, though, since we can't fill in the start address.*/
1118 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1119 {
1120 VEC_pop (btrace_block_s, btrace->blocks);
1121 return 0;
1122 }
1123
1124 /* Beware that block trace starts with the most recent block, so the
1125 chronologically first block in the new trace is the last block in
1126 the new trace's block vector. */
1127 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
1128 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1129
1130 /* If the current PC at the end of the block is the same as in our current
1131 trace, there are two explanations:
1132 1. we executed the instruction and some branch brought us back.
1133 2. we have not made any progress.
1134 In the first case, the delta trace vector should contain at least two
1135 entries.
1136 In the second case, the delta trace vector should contain exactly one
1137 entry for the partial block containing the current PC. Remove it. */
1138 if (first_new_block->end == last_insn->pc
1139 && VEC_length (btrace_block_s, btrace->blocks) == 1)
1140 {
1141 VEC_pop (btrace_block_s, btrace->blocks);
1142 return 0;
1143 }
1144
1145 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1146 core_addr_to_string_nz (first_new_block->end));
1147
1148 /* Do a simple sanity check to make sure we don't accidentally end up
1149 with a bad block. This should not occur in practice. */
1150 if (first_new_block->end < last_insn->pc)
1151 {
1152 warning (_("Error while trying to read delta trace. Falling back to "
1153 "a full read."));
1154 return -1;
1155 }
1156
1157 /* We adjust the last block to start at the end of our current trace. */
1158 gdb_assert (first_new_block->begin == 0);
1159 first_new_block->begin = last_insn->pc;
1160
1161 /* We simply pop the last insn so we can insert it again as part of
1162 the normal branch trace computation.
1163 Since instruction iterators are based on indices in the instructions
1164 vector, we don't leave any pointers dangling. */
1165 DEBUG ("pruning insn at %s for stitching",
1166 ftrace_print_insn_addr (last_insn));
1167
1168 VEC_pop (btrace_insn_s, last_bfun->insn);
1169
1170 /* The instructions vector may become empty temporarily if this has
1171 been the only instruction in this function segment.
1172 This violates the invariant but will be remedied shortly by
1173 btrace_compute_ftrace when we add the new trace. */
1174
1175 /* The only case where this would hurt is if the entire trace consisted
1176 of just that one instruction. If we remove it, we might turn the now
1177 empty btrace function segment into a gap. But we don't want gaps at
1178 the beginning. To avoid this, we remove the entire old trace. */
1179 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1180 btrace_clear (tp);
1181
1182 return 0;
1183 }
1184
1185 /* Adjust the block trace in order to stitch old and new trace together.
1186 BTRACE is the new delta trace between the last and the current stop.
1187 TP is the traced thread.
1188 May modifx BTRACE as well as the existing trace in TP.
1189 Return 0 on success, -1 otherwise. */
1190
1191 static int
1192 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
1193 {
1194 /* If we don't have trace, there's nothing to do. */
1195 if (btrace_data_empty (btrace))
1196 return 0;
1197
1198 switch (btrace->format)
1199 {
1200 case BTRACE_FORMAT_NONE:
1201 return 0;
1202
1203 case BTRACE_FORMAT_BTS:
1204 return btrace_stitch_bts (&btrace->variant.bts, tp);
1205
1206 case BTRACE_FORMAT_PT:
1207 /* Delta reads are not supported. */
1208 return -1;
1209 }
1210
1211 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1212 }
1213
1214 /* Clear the branch trace histories in BTINFO. */
1215
1216 static void
1217 btrace_clear_history (struct btrace_thread_info *btinfo)
1218 {
1219 xfree (btinfo->insn_history);
1220 xfree (btinfo->call_history);
1221 xfree (btinfo->replay);
1222
1223 btinfo->insn_history = NULL;
1224 btinfo->call_history = NULL;
1225 btinfo->replay = NULL;
1226 }
1227
1228 /* Clear the branch trace maintenance histories in BTINFO. */
1229
1230 static void
1231 btrace_maint_clear (struct btrace_thread_info *btinfo)
1232 {
1233 switch (btinfo->data.format)
1234 {
1235 default:
1236 break;
1237
1238 case BTRACE_FORMAT_BTS:
1239 btinfo->maint.variant.bts.packet_history.begin = 0;
1240 btinfo->maint.variant.bts.packet_history.end = 0;
1241 break;
1242
1243 #if defined (HAVE_LIBIPT)
1244 case BTRACE_FORMAT_PT:
1245 xfree (btinfo->maint.variant.pt.packets);
1246
1247 btinfo->maint.variant.pt.packets = NULL;
1248 btinfo->maint.variant.pt.packet_history.begin = 0;
1249 btinfo->maint.variant.pt.packet_history.end = 0;
1250 break;
1251 #endif /* defined (HAVE_LIBIPT) */
1252 }
1253 }
1254
1255 /* See btrace.h. */
1256
1257 void
1258 btrace_fetch (struct thread_info *tp)
1259 {
1260 struct btrace_thread_info *btinfo;
1261 struct btrace_target_info *tinfo;
1262 struct btrace_data btrace;
1263 struct cleanup *cleanup;
1264 int errcode;
1265
1266 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1267
1268 btinfo = &tp->btrace;
1269 tinfo = btinfo->target;
1270 if (tinfo == NULL)
1271 return;
1272
1273 /* There's no way we could get new trace while replaying.
1274 On the other hand, delta trace would return a partial record with the
1275 current PC, which is the replay PC, not the last PC, as expected. */
1276 if (btinfo->replay != NULL)
1277 return;
1278
1279 btrace_data_init (&btrace);
1280 cleanup = make_cleanup_btrace_data (&btrace);
1281
1282 /* Let's first try to extend the trace we already have. */
1283 if (btinfo->end != NULL)
1284 {
1285 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1286 if (errcode == 0)
1287 {
1288 /* Success. Let's try to stitch the traces together. */
1289 errcode = btrace_stitch_trace (&btrace, tp);
1290 }
1291 else
1292 {
1293 /* We failed to read delta trace. Let's try to read new trace. */
1294 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1295
1296 /* If we got any new trace, discard what we have. */
1297 if (errcode == 0 && !btrace_data_empty (&btrace))
1298 btrace_clear (tp);
1299 }
1300
1301 /* If we were not able to read the trace, we start over. */
1302 if (errcode != 0)
1303 {
1304 btrace_clear (tp);
1305 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1306 }
1307 }
1308 else
1309 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1310
1311 /* If we were not able to read the branch trace, signal an error. */
1312 if (errcode != 0)
1313 error (_("Failed to read branch trace."));
1314
1315 /* Compute the trace, provided we have any. */
1316 if (!btrace_data_empty (&btrace))
1317 {
1318 /* Store the raw trace data. The stored data will be cleared in
1319 btrace_clear, so we always append the new trace. */
1320 btrace_data_append (&btinfo->data, &btrace);
1321 btrace_maint_clear (btinfo);
1322
1323 btrace_clear_history (btinfo);
1324 btrace_compute_ftrace (tp, &btrace);
1325 }
1326
1327 do_cleanups (cleanup);
1328 }
1329
1330 /* See btrace.h. */
1331
1332 void
1333 btrace_clear (struct thread_info *tp)
1334 {
1335 struct btrace_thread_info *btinfo;
1336 struct btrace_function *it, *trash;
1337
1338 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1339
1340 /* Make sure btrace frames that may hold a pointer into the branch
1341 trace data are destroyed. */
1342 reinit_frame_cache ();
1343
1344 btinfo = &tp->btrace;
1345
1346 it = btinfo->begin;
1347 while (it != NULL)
1348 {
1349 trash = it;
1350 it = it->flow.next;
1351
1352 xfree (trash);
1353 }
1354
1355 btinfo->begin = NULL;
1356 btinfo->end = NULL;
1357 btinfo->ngaps = 0;
1358
1359 /* Must clear the maint data before - it depends on BTINFO->DATA. */
1360 btrace_maint_clear (btinfo);
1361 btrace_data_clear (&btinfo->data);
1362 btrace_clear_history (btinfo);
1363 }
1364
1365 /* See btrace.h. */
1366
1367 void
1368 btrace_free_objfile (struct objfile *objfile)
1369 {
1370 struct thread_info *tp;
1371
1372 DEBUG ("free objfile");
1373
1374 ALL_NON_EXITED_THREADS (tp)
1375 btrace_clear (tp);
1376 }
1377
1378 #if defined (HAVE_LIBEXPAT)
1379
1380 /* Check the btrace document version. */
1381
1382 static void
1383 check_xml_btrace_version (struct gdb_xml_parser *parser,
1384 const struct gdb_xml_element *element,
1385 void *user_data, VEC (gdb_xml_value_s) *attributes)
1386 {
1387 const char *version = xml_find_attribute (attributes, "version")->value;
1388
1389 if (strcmp (version, "1.0") != 0)
1390 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1391 }
1392
1393 /* Parse a btrace "block" xml record. */
1394
1395 static void
1396 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1397 const struct gdb_xml_element *element,
1398 void *user_data, VEC (gdb_xml_value_s) *attributes)
1399 {
1400 struct btrace_data *btrace;
1401 struct btrace_block *block;
1402 ULONGEST *begin, *end;
1403
1404 btrace = user_data;
1405
1406 switch (btrace->format)
1407 {
1408 case BTRACE_FORMAT_BTS:
1409 break;
1410
1411 case BTRACE_FORMAT_NONE:
1412 btrace->format = BTRACE_FORMAT_BTS;
1413 btrace->variant.bts.blocks = NULL;
1414 break;
1415
1416 default:
1417 gdb_xml_error (parser, _("Btrace format error."));
1418 }
1419
1420 begin = xml_find_attribute (attributes, "begin")->value;
1421 end = xml_find_attribute (attributes, "end")->value;
1422
1423 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1424 block->begin = *begin;
1425 block->end = *end;
1426 }
1427
1428 /* Parse a "raw" xml record. */
1429
1430 static void
1431 parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1432 gdb_byte **pdata, size_t *psize)
1433 {
1434 struct cleanup *cleanup;
1435 gdb_byte *data, *bin;
1436 size_t len, size;
1437
1438 len = strlen (body_text);
1439 if (len % 2 != 0)
1440 gdb_xml_error (parser, _("Bad raw data size."));
1441
1442 size = len / 2;
1443
1444 bin = data = (gdb_byte *) xmalloc (size);
1445 cleanup = make_cleanup (xfree, data);
1446
1447 /* We use hex encoding - see common/rsp-low.h. */
1448 while (len > 0)
1449 {
1450 char hi, lo;
1451
1452 hi = *body_text++;
1453 lo = *body_text++;
1454
1455 if (hi == 0 || lo == 0)
1456 gdb_xml_error (parser, _("Bad hex encoding."));
1457
1458 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1459 len -= 2;
1460 }
1461
1462 discard_cleanups (cleanup);
1463
1464 *pdata = data;
1465 *psize = size;
1466 }
1467
1468 /* Parse a btrace pt-config "cpu" xml record. */
1469
1470 static void
1471 parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1472 const struct gdb_xml_element *element,
1473 void *user_data,
1474 VEC (gdb_xml_value_s) *attributes)
1475 {
1476 struct btrace_data *btrace;
1477 const char *vendor;
1478 ULONGEST *family, *model, *stepping;
1479
1480 vendor = xml_find_attribute (attributes, "vendor")->value;
1481 family = xml_find_attribute (attributes, "family")->value;
1482 model = xml_find_attribute (attributes, "model")->value;
1483 stepping = xml_find_attribute (attributes, "stepping")->value;
1484
1485 btrace = user_data;
1486
1487 if (strcmp (vendor, "GenuineIntel") == 0)
1488 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1489
1490 btrace->variant.pt.config.cpu.family = *family;
1491 btrace->variant.pt.config.cpu.model = *model;
1492 btrace->variant.pt.config.cpu.stepping = *stepping;
1493 }
1494
1495 /* Parse a btrace pt "raw" xml record. */
1496
1497 static void
1498 parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1499 const struct gdb_xml_element *element,
1500 void *user_data, const char *body_text)
1501 {
1502 struct btrace_data *btrace;
1503
1504 btrace = user_data;
1505 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1506 &btrace->variant.pt.size);
1507 }
1508
1509 /* Parse a btrace "pt" xml record. */
1510
1511 static void
1512 parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1513 const struct gdb_xml_element *element,
1514 void *user_data, VEC (gdb_xml_value_s) *attributes)
1515 {
1516 struct btrace_data *btrace;
1517
1518 btrace = user_data;
1519 btrace->format = BTRACE_FORMAT_PT;
1520 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1521 btrace->variant.pt.data = NULL;
1522 btrace->variant.pt.size = 0;
1523 }
1524
1525 static const struct gdb_xml_attribute block_attributes[] = {
1526 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1527 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1528 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1529 };
1530
1531 static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1532 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1533 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1534 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1535 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1536 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1537 };
1538
1539 static const struct gdb_xml_element btrace_pt_config_children[] = {
1540 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1541 parse_xml_btrace_pt_config_cpu, NULL },
1542 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1543 };
1544
1545 static const struct gdb_xml_element btrace_pt_children[] = {
1546 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1547 NULL },
1548 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1549 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1550 };
1551
1552 static const struct gdb_xml_attribute btrace_attributes[] = {
1553 { "version", GDB_XML_AF_NONE, NULL, NULL },
1554 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1555 };
1556
1557 static const struct gdb_xml_element btrace_children[] = {
1558 { "block", block_attributes, NULL,
1559 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1560 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1561 NULL },
1562 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1563 };
1564
1565 static const struct gdb_xml_element btrace_elements[] = {
1566 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1567 check_xml_btrace_version, NULL },
1568 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1569 };
1570
1571 #endif /* defined (HAVE_LIBEXPAT) */
1572
1573 /* See btrace.h. */
1574
1575 void
1576 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1577 {
1578 struct cleanup *cleanup;
1579 int errcode;
1580
1581 #if defined (HAVE_LIBEXPAT)
1582
1583 btrace->format = BTRACE_FORMAT_NONE;
1584
1585 cleanup = make_cleanup_btrace_data (btrace);
1586 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1587 buffer, btrace);
1588 if (errcode != 0)
1589 error (_("Error parsing branch trace."));
1590
1591 /* Keep parse results. */
1592 discard_cleanups (cleanup);
1593
1594 #else /* !defined (HAVE_LIBEXPAT) */
1595
1596 error (_("Cannot process branch trace. XML parsing is not supported."));
1597
1598 #endif /* !defined (HAVE_LIBEXPAT) */
1599 }
1600
1601 #if defined (HAVE_LIBEXPAT)
1602
1603 /* Parse a btrace-conf "bts" xml record. */
1604
1605 static void
1606 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1607 const struct gdb_xml_element *element,
1608 void *user_data, VEC (gdb_xml_value_s) *attributes)
1609 {
1610 struct btrace_config *conf;
1611 struct gdb_xml_value *size;
1612
1613 conf = user_data;
1614 conf->format = BTRACE_FORMAT_BTS;
1615 conf->bts.size = 0;
1616
1617 size = xml_find_attribute (attributes, "size");
1618 if (size != NULL)
1619 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
1620 }
1621
1622 /* Parse a btrace-conf "pt" xml record. */
1623
1624 static void
1625 parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1626 const struct gdb_xml_element *element,
1627 void *user_data, VEC (gdb_xml_value_s) *attributes)
1628 {
1629 struct btrace_config *conf;
1630 struct gdb_xml_value *size;
1631
1632 conf = user_data;
1633 conf->format = BTRACE_FORMAT_PT;
1634 conf->pt.size = 0;
1635
1636 size = xml_find_attribute (attributes, "size");
1637 if (size != NULL)
1638 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1639 }
1640
1641 static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1642 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1643 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1644 };
1645
1646 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1647 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1648 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1649 };
1650
1651 static const struct gdb_xml_element btrace_conf_children[] = {
1652 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1653 parse_xml_btrace_conf_bts, NULL },
1654 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1655 parse_xml_btrace_conf_pt, NULL },
1656 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1657 };
1658
1659 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1660 { "version", GDB_XML_AF_NONE, NULL, NULL },
1661 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1662 };
1663
1664 static const struct gdb_xml_element btrace_conf_elements[] = {
1665 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1666 GDB_XML_EF_NONE, NULL, NULL },
1667 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1668 };
1669
1670 #endif /* defined (HAVE_LIBEXPAT) */
1671
1672 /* See btrace.h. */
1673
1674 void
1675 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1676 {
1677 int errcode;
1678
1679 #if defined (HAVE_LIBEXPAT)
1680
1681 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1682 btrace_conf_elements, xml, conf);
1683 if (errcode != 0)
1684 error (_("Error parsing branch trace configuration."));
1685
1686 #else /* !defined (HAVE_LIBEXPAT) */
1687
1688 error (_("XML parsing is not supported."));
1689
1690 #endif /* !defined (HAVE_LIBEXPAT) */
1691 }
1692
1693 /* See btrace.h. */
1694
1695 const struct btrace_insn *
1696 btrace_insn_get (const struct btrace_insn_iterator *it)
1697 {
1698 const struct btrace_function *bfun;
1699 unsigned int index, end;
1700
1701 index = it->index;
1702 bfun = it->function;
1703
1704 /* Check if the iterator points to a gap in the trace. */
1705 if (bfun->errcode != 0)
1706 return NULL;
1707
1708 /* The index is within the bounds of this function's instruction vector. */
1709 end = VEC_length (btrace_insn_s, bfun->insn);
1710 gdb_assert (0 < end);
1711 gdb_assert (index < end);
1712
1713 return VEC_index (btrace_insn_s, bfun->insn, index);
1714 }
1715
1716 /* See btrace.h. */
1717
1718 unsigned int
1719 btrace_insn_number (const struct btrace_insn_iterator *it)
1720 {
1721 const struct btrace_function *bfun;
1722
1723 bfun = it->function;
1724
1725 /* Return zero if the iterator points to a gap in the trace. */
1726 if (bfun->errcode != 0)
1727 return 0;
1728
1729 return bfun->insn_offset + it->index;
1730 }
1731
1732 /* See btrace.h. */
1733
1734 void
1735 btrace_insn_begin (struct btrace_insn_iterator *it,
1736 const struct btrace_thread_info *btinfo)
1737 {
1738 const struct btrace_function *bfun;
1739
1740 bfun = btinfo->begin;
1741 if (bfun == NULL)
1742 error (_("No trace."));
1743
1744 it->function = bfun;
1745 it->index = 0;
1746 }
1747
1748 /* See btrace.h. */
1749
1750 void
1751 btrace_insn_end (struct btrace_insn_iterator *it,
1752 const struct btrace_thread_info *btinfo)
1753 {
1754 const struct btrace_function *bfun;
1755 unsigned int length;
1756
1757 bfun = btinfo->end;
1758 if (bfun == NULL)
1759 error (_("No trace."));
1760
1761 length = VEC_length (btrace_insn_s, bfun->insn);
1762
1763 /* The last function may either be a gap or it contains the current
1764 instruction, which is one past the end of the execution trace; ignore
1765 it. */
1766 if (length > 0)
1767 length -= 1;
1768
1769 it->function = bfun;
1770 it->index = length;
1771 }
1772
1773 /* See btrace.h. */
1774
1775 unsigned int
1776 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1777 {
1778 const struct btrace_function *bfun;
1779 unsigned int index, steps;
1780
1781 bfun = it->function;
1782 steps = 0;
1783 index = it->index;
1784
1785 while (stride != 0)
1786 {
1787 unsigned int end, space, adv;
1788
1789 end = VEC_length (btrace_insn_s, bfun->insn);
1790
1791 /* An empty function segment represents a gap in the trace. We count
1792 it as one instruction. */
1793 if (end == 0)
1794 {
1795 const struct btrace_function *next;
1796
1797 next = bfun->flow.next;
1798 if (next == NULL)
1799 break;
1800
1801 stride -= 1;
1802 steps += 1;
1803
1804 bfun = next;
1805 index = 0;
1806
1807 continue;
1808 }
1809
1810 gdb_assert (0 < end);
1811 gdb_assert (index < end);
1812
1813 /* Compute the number of instructions remaining in this segment. */
1814 space = end - index;
1815
1816 /* Advance the iterator as far as possible within this segment. */
1817 adv = min (space, stride);
1818 stride -= adv;
1819 index += adv;
1820 steps += adv;
1821
1822 /* Move to the next function if we're at the end of this one. */
1823 if (index == end)
1824 {
1825 const struct btrace_function *next;
1826
1827 next = bfun->flow.next;
1828 if (next == NULL)
1829 {
1830 /* We stepped past the last function.
1831
1832 Let's adjust the index to point to the last instruction in
1833 the previous function. */
1834 index -= 1;
1835 steps -= 1;
1836 break;
1837 }
1838
1839 /* We now point to the first instruction in the new function. */
1840 bfun = next;
1841 index = 0;
1842 }
1843
1844 /* We did make progress. */
1845 gdb_assert (adv > 0);
1846 }
1847
1848 /* Update the iterator. */
1849 it->function = bfun;
1850 it->index = index;
1851
1852 return steps;
1853 }
1854
1855 /* See btrace.h. */
1856
1857 unsigned int
1858 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1859 {
1860 const struct btrace_function *bfun;
1861 unsigned int index, steps;
1862
1863 bfun = it->function;
1864 steps = 0;
1865 index = it->index;
1866
1867 while (stride != 0)
1868 {
1869 unsigned int adv;
1870
1871 /* Move to the previous function if we're at the start of this one. */
1872 if (index == 0)
1873 {
1874 const struct btrace_function *prev;
1875
1876 prev = bfun->flow.prev;
1877 if (prev == NULL)
1878 break;
1879
1880 /* We point to one after the last instruction in the new function. */
1881 bfun = prev;
1882 index = VEC_length (btrace_insn_s, bfun->insn);
1883
1884 /* An empty function segment represents a gap in the trace. We count
1885 it as one instruction. */
1886 if (index == 0)
1887 {
1888 stride -= 1;
1889 steps += 1;
1890
1891 continue;
1892 }
1893 }
1894
1895 /* Advance the iterator as far as possible within this segment. */
1896 adv = min (index, stride);
1897
1898 stride -= adv;
1899 index -= adv;
1900 steps += adv;
1901
1902 /* We did make progress. */
1903 gdb_assert (adv > 0);
1904 }
1905
1906 /* Update the iterator. */
1907 it->function = bfun;
1908 it->index = index;
1909
1910 return steps;
1911 }
1912
1913 /* See btrace.h. */
1914
1915 int
1916 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1917 const struct btrace_insn_iterator *rhs)
1918 {
1919 unsigned int lnum, rnum;
1920
1921 lnum = btrace_insn_number (lhs);
1922 rnum = btrace_insn_number (rhs);
1923
1924 /* A gap has an instruction number of zero. Things are getting more
1925 complicated if gaps are involved.
1926
1927 We take the instruction number offset from the iterator's function.
1928 This is the number of the first instruction after the gap.
1929
1930 This is OK as long as both lhs and rhs point to gaps. If only one of
1931 them does, we need to adjust the number based on the other's regular
1932 instruction number. Otherwise, a gap might compare equal to an
1933 instruction. */
1934
1935 if (lnum == 0 && rnum == 0)
1936 {
1937 lnum = lhs->function->insn_offset;
1938 rnum = rhs->function->insn_offset;
1939 }
1940 else if (lnum == 0)
1941 {
1942 lnum = lhs->function->insn_offset;
1943
1944 if (lnum == rnum)
1945 lnum -= 1;
1946 }
1947 else if (rnum == 0)
1948 {
1949 rnum = rhs->function->insn_offset;
1950
1951 if (rnum == lnum)
1952 rnum -= 1;
1953 }
1954
1955 return (int) (lnum - rnum);
1956 }
1957
1958 /* See btrace.h. */
1959
1960 int
1961 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1962 const struct btrace_thread_info *btinfo,
1963 unsigned int number)
1964 {
1965 const struct btrace_function *bfun;
1966 unsigned int end, length;
1967
1968 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1969 {
1970 /* Skip gaps. */
1971 if (bfun->errcode != 0)
1972 continue;
1973
1974 if (bfun->insn_offset <= number)
1975 break;
1976 }
1977
1978 if (bfun == NULL)
1979 return 0;
1980
1981 length = VEC_length (btrace_insn_s, bfun->insn);
1982 gdb_assert (length > 0);
1983
1984 end = bfun->insn_offset + length;
1985 if (end <= number)
1986 return 0;
1987
1988 it->function = bfun;
1989 it->index = number - bfun->insn_offset;
1990
1991 return 1;
1992 }
1993
1994 /* See btrace.h. */
1995
1996 const struct btrace_function *
1997 btrace_call_get (const struct btrace_call_iterator *it)
1998 {
1999 return it->function;
2000 }
2001
2002 /* See btrace.h. */
2003
2004 unsigned int
2005 btrace_call_number (const struct btrace_call_iterator *it)
2006 {
2007 const struct btrace_thread_info *btinfo;
2008 const struct btrace_function *bfun;
2009 unsigned int insns;
2010
2011 btinfo = it->btinfo;
2012 bfun = it->function;
2013 if (bfun != NULL)
2014 return bfun->number;
2015
2016 /* For the end iterator, i.e. bfun == NULL, we return one more than the
2017 number of the last function. */
2018 bfun = btinfo->end;
2019 insns = VEC_length (btrace_insn_s, bfun->insn);
2020
2021 /* If the function contains only a single instruction (i.e. the current
2022 instruction), it will be skipped and its number is already the number
2023 we seek. */
2024 if (insns == 1)
2025 return bfun->number;
2026
2027 /* Otherwise, return one more than the number of the last function. */
2028 return bfun->number + 1;
2029 }
2030
2031 /* See btrace.h. */
2032
2033 void
2034 btrace_call_begin (struct btrace_call_iterator *it,
2035 const struct btrace_thread_info *btinfo)
2036 {
2037 const struct btrace_function *bfun;
2038
2039 bfun = btinfo->begin;
2040 if (bfun == NULL)
2041 error (_("No trace."));
2042
2043 it->btinfo = btinfo;
2044 it->function = bfun;
2045 }
2046
2047 /* See btrace.h. */
2048
2049 void
2050 btrace_call_end (struct btrace_call_iterator *it,
2051 const struct btrace_thread_info *btinfo)
2052 {
2053 const struct btrace_function *bfun;
2054
2055 bfun = btinfo->end;
2056 if (bfun == NULL)
2057 error (_("No trace."));
2058
2059 it->btinfo = btinfo;
2060 it->function = NULL;
2061 }
2062
2063 /* See btrace.h. */
2064
2065 unsigned int
2066 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2067 {
2068 const struct btrace_function *bfun;
2069 unsigned int steps;
2070
2071 bfun = it->function;
2072 steps = 0;
2073 while (bfun != NULL)
2074 {
2075 const struct btrace_function *next;
2076 unsigned int insns;
2077
2078 next = bfun->flow.next;
2079 if (next == NULL)
2080 {
2081 /* Ignore the last function if it only contains a single
2082 (i.e. the current) instruction. */
2083 insns = VEC_length (btrace_insn_s, bfun->insn);
2084 if (insns == 1)
2085 steps -= 1;
2086 }
2087
2088 if (stride == steps)
2089 break;
2090
2091 bfun = next;
2092 steps += 1;
2093 }
2094
2095 it->function = bfun;
2096 return steps;
2097 }
2098
2099 /* See btrace.h. */
2100
2101 unsigned int
2102 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2103 {
2104 const struct btrace_thread_info *btinfo;
2105 const struct btrace_function *bfun;
2106 unsigned int steps;
2107
2108 bfun = it->function;
2109 steps = 0;
2110
2111 if (bfun == NULL)
2112 {
2113 unsigned int insns;
2114
2115 btinfo = it->btinfo;
2116 bfun = btinfo->end;
2117 if (bfun == NULL)
2118 return 0;
2119
2120 /* Ignore the last function if it only contains a single
2121 (i.e. the current) instruction. */
2122 insns = VEC_length (btrace_insn_s, bfun->insn);
2123 if (insns == 1)
2124 bfun = bfun->flow.prev;
2125
2126 if (bfun == NULL)
2127 return 0;
2128
2129 steps += 1;
2130 }
2131
2132 while (steps < stride)
2133 {
2134 const struct btrace_function *prev;
2135
2136 prev = bfun->flow.prev;
2137 if (prev == NULL)
2138 break;
2139
2140 bfun = prev;
2141 steps += 1;
2142 }
2143
2144 it->function = bfun;
2145 return steps;
2146 }
2147
2148 /* See btrace.h. */
2149
2150 int
2151 btrace_call_cmp (const struct btrace_call_iterator *lhs,
2152 const struct btrace_call_iterator *rhs)
2153 {
2154 unsigned int lnum, rnum;
2155
2156 lnum = btrace_call_number (lhs);
2157 rnum = btrace_call_number (rhs);
2158
2159 return (int) (lnum - rnum);
2160 }
2161
2162 /* See btrace.h. */
2163
2164 int
2165 btrace_find_call_by_number (struct btrace_call_iterator *it,
2166 const struct btrace_thread_info *btinfo,
2167 unsigned int number)
2168 {
2169 const struct btrace_function *bfun;
2170
2171 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2172 {
2173 unsigned int bnum;
2174
2175 bnum = bfun->number;
2176 if (number == bnum)
2177 {
2178 it->btinfo = btinfo;
2179 it->function = bfun;
2180 return 1;
2181 }
2182
2183 /* Functions are ordered and numbered consecutively. We could bail out
2184 earlier. On the other hand, it is very unlikely that we search for
2185 a nonexistent function. */
2186 }
2187
2188 return 0;
2189 }
2190
2191 /* See btrace.h. */
2192
2193 void
2194 btrace_set_insn_history (struct btrace_thread_info *btinfo,
2195 const struct btrace_insn_iterator *begin,
2196 const struct btrace_insn_iterator *end)
2197 {
2198 if (btinfo->insn_history == NULL)
2199 btinfo->insn_history = XCNEW (struct btrace_insn_history);
2200
2201 btinfo->insn_history->begin = *begin;
2202 btinfo->insn_history->end = *end;
2203 }
2204
2205 /* See btrace.h. */
2206
2207 void
2208 btrace_set_call_history (struct btrace_thread_info *btinfo,
2209 const struct btrace_call_iterator *begin,
2210 const struct btrace_call_iterator *end)
2211 {
2212 gdb_assert (begin->btinfo == end->btinfo);
2213
2214 if (btinfo->call_history == NULL)
2215 btinfo->call_history = XCNEW (struct btrace_call_history);
2216
2217 btinfo->call_history->begin = *begin;
2218 btinfo->call_history->end = *end;
2219 }
2220
2221 /* See btrace.h. */
2222
2223 int
2224 btrace_is_replaying (struct thread_info *tp)
2225 {
2226 return tp->btrace.replay != NULL;
2227 }
2228
2229 /* See btrace.h. */
2230
2231 int
2232 btrace_is_empty (struct thread_info *tp)
2233 {
2234 struct btrace_insn_iterator begin, end;
2235 struct btrace_thread_info *btinfo;
2236
2237 btinfo = &tp->btrace;
2238
2239 if (btinfo->begin == NULL)
2240 return 1;
2241
2242 btrace_insn_begin (&begin, btinfo);
2243 btrace_insn_end (&end, btinfo);
2244
2245 return btrace_insn_cmp (&begin, &end) == 0;
2246 }
2247
2248 /* Forward the cleanup request. */
2249
2250 static void
2251 do_btrace_data_cleanup (void *arg)
2252 {
2253 btrace_data_fini (arg);
2254 }
2255
2256 /* See btrace.h. */
2257
2258 struct cleanup *
2259 make_cleanup_btrace_data (struct btrace_data *data)
2260 {
2261 return make_cleanup (do_btrace_data_cleanup, data);
2262 }
2263
2264 #if defined (HAVE_LIBIPT)
2265
2266 /* Print a single packet. */
2267
2268 static void
2269 pt_print_packet (const struct pt_packet *packet)
2270 {
2271 switch (packet->type)
2272 {
2273 default:
2274 printf_unfiltered (("[??: %x]"), packet->type);
2275 break;
2276
2277 case ppt_psb:
2278 printf_unfiltered (("psb"));
2279 break;
2280
2281 case ppt_psbend:
2282 printf_unfiltered (("psbend"));
2283 break;
2284
2285 case ppt_pad:
2286 printf_unfiltered (("pad"));
2287 break;
2288
2289 case ppt_tip:
2290 printf_unfiltered (("tip %u: 0x%" PRIx64 ""),
2291 packet->payload.ip.ipc,
2292 packet->payload.ip.ip);
2293 break;
2294
2295 case ppt_tip_pge:
2296 printf_unfiltered (("tip.pge %u: 0x%" PRIx64 ""),
2297 packet->payload.ip.ipc,
2298 packet->payload.ip.ip);
2299 break;
2300
2301 case ppt_tip_pgd:
2302 printf_unfiltered (("tip.pgd %u: 0x%" PRIx64 ""),
2303 packet->payload.ip.ipc,
2304 packet->payload.ip.ip);
2305 break;
2306
2307 case ppt_fup:
2308 printf_unfiltered (("fup %u: 0x%" PRIx64 ""),
2309 packet->payload.ip.ipc,
2310 packet->payload.ip.ip);
2311 break;
2312
2313 case ppt_tnt_8:
2314 printf_unfiltered (("tnt-8 %u: 0x%" PRIx64 ""),
2315 packet->payload.tnt.bit_size,
2316 packet->payload.tnt.payload);
2317 break;
2318
2319 case ppt_tnt_64:
2320 printf_unfiltered (("tnt-64 %u: 0x%" PRIx64 ""),
2321 packet->payload.tnt.bit_size,
2322 packet->payload.tnt.payload);
2323 break;
2324
2325 case ppt_pip:
2326 printf_unfiltered (("pip %" PRIx64 "%s"), packet->payload.pip.cr3,
2327 packet->payload.pip.nr ? (" nr") : (""));
2328 break;
2329
2330 case ppt_tsc:
2331 printf_unfiltered (("tsc %" PRIx64 ""), packet->payload.tsc.tsc);
2332 break;
2333
2334 case ppt_cbr:
2335 printf_unfiltered (("cbr %u"), packet->payload.cbr.ratio);
2336 break;
2337
2338 case ppt_mode:
2339 switch (packet->payload.mode.leaf)
2340 {
2341 default:
2342 printf_unfiltered (("mode %u"), packet->payload.mode.leaf);
2343 break;
2344
2345 case pt_mol_exec:
2346 printf_unfiltered (("mode.exec%s%s"),
2347 packet->payload.mode.bits.exec.csl
2348 ? (" cs.l") : (""),
2349 packet->payload.mode.bits.exec.csd
2350 ? (" cs.d") : (""));
2351 break;
2352
2353 case pt_mol_tsx:
2354 printf_unfiltered (("mode.tsx%s%s"),
2355 packet->payload.mode.bits.tsx.intx
2356 ? (" intx") : (""),
2357 packet->payload.mode.bits.tsx.abrt
2358 ? (" abrt") : (""));
2359 break;
2360 }
2361 break;
2362
2363 case ppt_ovf:
2364 printf_unfiltered (("ovf"));
2365 break;
2366
2367 case ppt_stop:
2368 printf_unfiltered (("stop"));
2369 break;
2370
2371 case ppt_vmcs:
2372 printf_unfiltered (("vmcs %" PRIx64 ""), packet->payload.vmcs.base);
2373 break;
2374
2375 case ppt_tma:
2376 printf_unfiltered (("tma %x %x"), packet->payload.tma.ctc,
2377 packet->payload.tma.fc);
2378 break;
2379
2380 case ppt_mtc:
2381 printf_unfiltered (("mtc %x"), packet->payload.mtc.ctc);
2382 break;
2383
2384 case ppt_cyc:
2385 printf_unfiltered (("cyc %" PRIx64 ""), packet->payload.cyc.value);
2386 break;
2387
2388 case ppt_mnt:
2389 printf_unfiltered (("mnt %" PRIx64 ""), packet->payload.mnt.payload);
2390 break;
2391 }
2392 }
2393
2394 /* Decode packets into MAINT using DECODER. */
2395
2396 static void
2397 btrace_maint_decode_pt (struct btrace_maint_info *maint,
2398 struct pt_packet_decoder *decoder)
2399 {
2400 int errcode;
2401
2402 for (;;)
2403 {
2404 struct btrace_pt_packet packet;
2405
2406 errcode = pt_pkt_sync_forward (decoder);
2407 if (errcode < 0)
2408 break;
2409
2410 for (;;)
2411 {
2412 pt_pkt_get_offset (decoder, &packet.offset);
2413
2414 errcode = pt_pkt_next (decoder, &packet.packet,
2415 sizeof(packet.packet));
2416 if (errcode < 0)
2417 break;
2418
2419 if (maint_btrace_pt_skip_pad == 0 || packet.packet.type != ppt_pad)
2420 {
2421 packet.errcode = pt_errcode (errcode);
2422 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2423 &packet);
2424 }
2425 }
2426
2427 if (errcode == -pte_eos)
2428 break;
2429
2430 packet.errcode = pt_errcode (errcode);
2431 VEC_safe_push (btrace_pt_packet_s, maint->variant.pt.packets,
2432 &packet);
2433
2434 warning (_("Error at trace offset 0x%" PRIx64 ": %s."),
2435 packet.offset, pt_errstr (packet.errcode));
2436 }
2437
2438 if (errcode != -pte_eos)
2439 warning (_("Failed to synchronize onto the Intel(R) Processor Trace "
2440 "stream: %s."), pt_errstr (pt_errcode (errcode)));
2441 }
2442
2443 /* Update the packet history in BTINFO. */
2444
2445 static void
2446 btrace_maint_update_pt_packets (struct btrace_thread_info *btinfo)
2447 {
2448 volatile struct gdb_exception except;
2449 struct pt_packet_decoder *decoder;
2450 struct btrace_data_pt *pt;
2451 struct pt_config config;
2452 int errcode;
2453
2454 pt = &btinfo->data.variant.pt;
2455
2456 /* Nothing to do if there is no trace. */
2457 if (pt->size == 0)
2458 return;
2459
2460 memset (&config, 0, sizeof(config));
2461
2462 config.size = sizeof (config);
2463 config.begin = pt->data;
2464 config.end = pt->data + pt->size;
2465
2466 config.cpu.vendor = pt_translate_cpu_vendor (pt->config.cpu.vendor);
2467 config.cpu.family = pt->config.cpu.family;
2468 config.cpu.model = pt->config.cpu.model;
2469 config.cpu.stepping = pt->config.cpu.stepping;
2470
2471 errcode = pt_cpu_errata (&config.errata, &config.cpu);
2472 if (errcode < 0)
2473 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
2474 pt_errstr (pt_errcode (errcode)));
2475
2476 decoder = pt_pkt_alloc_decoder (&config);
2477 if (decoder == NULL)
2478 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
2479
2480 TRY
2481 {
2482 btrace_maint_decode_pt (&btinfo->maint, decoder);
2483 }
2484 CATCH (except, RETURN_MASK_ALL)
2485 {
2486 pt_pkt_free_decoder (decoder);
2487
2488 if (except.reason < 0)
2489 throw_exception (except);
2490 }
2491 END_CATCH
2492
2493 pt_pkt_free_decoder (decoder);
2494 }
2495
2496 #endif /* !defined (HAVE_LIBIPT) */
2497
2498 /* Update the packet maintenance information for BTINFO and store the
2499 low and high bounds into BEGIN and END, respectively.
2500 Store the current iterator state into FROM and TO. */
2501
2502 static void
2503 btrace_maint_update_packets (struct btrace_thread_info *btinfo,
2504 unsigned int *begin, unsigned int *end,
2505 unsigned int *from, unsigned int *to)
2506 {
2507 switch (btinfo->data.format)
2508 {
2509 default:
2510 *begin = 0;
2511 *end = 0;
2512 *from = 0;
2513 *to = 0;
2514 break;
2515
2516 case BTRACE_FORMAT_BTS:
2517 /* Nothing to do - we operate directly on BTINFO->DATA. */
2518 *begin = 0;
2519 *end = VEC_length (btrace_block_s, btinfo->data.variant.bts.blocks);
2520 *from = btinfo->maint.variant.bts.packet_history.begin;
2521 *to = btinfo->maint.variant.bts.packet_history.end;
2522 break;
2523
2524 #if defined (HAVE_LIBIPT)
2525 case BTRACE_FORMAT_PT:
2526 if (VEC_empty (btrace_pt_packet_s, btinfo->maint.variant.pt.packets))
2527 btrace_maint_update_pt_packets (btinfo);
2528
2529 *begin = 0;
2530 *end = VEC_length (btrace_pt_packet_s, btinfo->maint.variant.pt.packets);
2531 *from = btinfo->maint.variant.pt.packet_history.begin;
2532 *to = btinfo->maint.variant.pt.packet_history.end;
2533 break;
2534 #endif /* defined (HAVE_LIBIPT) */
2535 }
2536 }
2537
2538 /* Print packets in BTINFO from BEGIN (inclusive) until END (exclusive) and
2539 update the current iterator position. */
2540
2541 static void
2542 btrace_maint_print_packets (struct btrace_thread_info *btinfo,
2543 unsigned int begin, unsigned int end)
2544 {
2545 switch (btinfo->data.format)
2546 {
2547 default:
2548 break;
2549
2550 case BTRACE_FORMAT_BTS:
2551 {
2552 VEC (btrace_block_s) *blocks;
2553 unsigned int blk;
2554
2555 blocks = btinfo->data.variant.bts.blocks;
2556 for (blk = begin; blk < end; ++blk)
2557 {
2558 const btrace_block_s *block;
2559
2560 block = VEC_index (btrace_block_s, blocks, blk);
2561
2562 printf_unfiltered ("%u\tbegin: %s, end: %s\n", blk,
2563 core_addr_to_string_nz (block->begin),
2564 core_addr_to_string_nz (block->end));
2565 }
2566
2567 btinfo->maint.variant.bts.packet_history.begin = begin;
2568 btinfo->maint.variant.bts.packet_history.end = end;
2569 }
2570 break;
2571
2572 #if defined (HAVE_LIBIPT)
2573 case BTRACE_FORMAT_PT:
2574 {
2575 VEC (btrace_pt_packet_s) *packets;
2576 unsigned int pkt;
2577
2578 packets = btinfo->maint.variant.pt.packets;
2579 for (pkt = begin; pkt < end; ++pkt)
2580 {
2581 const struct btrace_pt_packet *packet;
2582
2583 packet = VEC_index (btrace_pt_packet_s, packets, pkt);
2584
2585 printf_unfiltered ("%u\t", pkt);
2586 printf_unfiltered ("0x%" PRIx64 "\t", packet->offset);
2587
2588 if (packet->errcode == pte_ok)
2589 pt_print_packet (&packet->packet);
2590 else
2591 printf_unfiltered ("[error: %s]", pt_errstr (packet->errcode));
2592
2593 printf_unfiltered ("\n");
2594 }
2595
2596 btinfo->maint.variant.pt.packet_history.begin = begin;
2597 btinfo->maint.variant.pt.packet_history.end = end;
2598 }
2599 break;
2600 #endif /* defined (HAVE_LIBIPT) */
2601 }
2602 }
2603
2604 /* Read a number from an argument string. */
2605
2606 static unsigned int
2607 get_uint (char **arg)
2608 {
2609 char *begin, *end, *pos;
2610 unsigned long number;
2611
2612 begin = *arg;
2613 pos = skip_spaces (begin);
2614
2615 if (!isdigit (*pos))
2616 error (_("Expected positive number, got: %s."), pos);
2617
2618 number = strtoul (pos, &end, 10);
2619 if (number > UINT_MAX)
2620 error (_("Number too big."));
2621
2622 *arg += (end - begin);
2623
2624 return (unsigned int) number;
2625 }
2626
2627 /* Read a context size from an argument string. */
2628
2629 static int
2630 get_context_size (char **arg)
2631 {
2632 char *pos;
2633 int number;
2634
2635 pos = skip_spaces (*arg);
2636
2637 if (!isdigit (*pos))
2638 error (_("Expected positive number, got: %s."), pos);
2639
2640 return strtol (pos, arg, 10);
2641 }
2642
2643 /* Complain about junk at the end of an argument string. */
2644
2645 static void
2646 no_chunk (char *arg)
2647 {
2648 if (*arg != 0)
2649 error (_("Junk after argument: %s."), arg);
2650 }
2651
2652 /* The "maintenance btrace packet-history" command. */
2653
2654 static void
2655 maint_btrace_packet_history_cmd (char *arg, int from_tty)
2656 {
2657 struct btrace_thread_info *btinfo;
2658 struct thread_info *tp;
2659 unsigned int size, begin, end, from, to;
2660
2661 tp = find_thread_ptid (inferior_ptid);
2662 if (tp == NULL)
2663 error (_("No thread."));
2664
2665 size = 10;
2666 btinfo = &tp->btrace;
2667
2668 btrace_maint_update_packets (btinfo, &begin, &end, &from, &to);
2669 if (begin == end)
2670 {
2671 printf_unfiltered (_("No trace.\n"));
2672 return;
2673 }
2674
2675 if (arg == NULL || *arg == 0 || strcmp (arg, "+") == 0)
2676 {
2677 from = to;
2678
2679 if (end - from < size)
2680 size = end - from;
2681 to = from + size;
2682 }
2683 else if (strcmp (arg, "-") == 0)
2684 {
2685 to = from;
2686
2687 if (to - begin < size)
2688 size = to - begin;
2689 from = to - size;
2690 }
2691 else
2692 {
2693 from = get_uint (&arg);
2694 if (end <= from)
2695 error (_("'%u' is out of range."), from);
2696
2697 arg = skip_spaces (arg);
2698 if (*arg == ',')
2699 {
2700 arg = skip_spaces (++arg);
2701
2702 if (*arg == '+')
2703 {
2704 arg += 1;
2705 size = get_context_size (&arg);
2706
2707 no_chunk (arg);
2708
2709 if (end - from < size)
2710 size = end - from;
2711 to = from + size;
2712 }
2713 else if (*arg == '-')
2714 {
2715 arg += 1;
2716 size = get_context_size (&arg);
2717
2718 no_chunk (arg);
2719
2720 /* Include the packet given as first argument. */
2721 from += 1;
2722 to = from;
2723
2724 if (to - begin < size)
2725 size = to - begin;
2726 from = to - size;
2727 }
2728 else
2729 {
2730 to = get_uint (&arg);
2731
2732 /* Include the packet at the second argument and silently
2733 truncate the range. */
2734 if (to < end)
2735 to += 1;
2736 else
2737 to = end;
2738
2739 no_chunk (arg);
2740 }
2741 }
2742 else
2743 {
2744 no_chunk (arg);
2745
2746 if (end - from < size)
2747 size = end - from;
2748 to = from + size;
2749 }
2750
2751 dont_repeat ();
2752 }
2753
2754 btrace_maint_print_packets (btinfo, from, to);
2755 }
2756
2757 /* The "maintenance btrace clear-packet-history" command. */
2758
2759 static void
2760 maint_btrace_clear_packet_history_cmd (char *args, int from_tty)
2761 {
2762 struct btrace_thread_info *btinfo;
2763 struct thread_info *tp;
2764
2765 if (args != NULL && *args != 0)
2766 error (_("Invalid argument."));
2767
2768 tp = find_thread_ptid (inferior_ptid);
2769 if (tp == NULL)
2770 error (_("No thread."));
2771
2772 btinfo = &tp->btrace;
2773
2774 /* Must clear the maint data before - it depends on BTINFO->DATA. */
2775 btrace_maint_clear (btinfo);
2776 btrace_data_clear (&btinfo->data);
2777 }
2778
2779 /* The "maintenance btrace clear" command. */
2780
2781 static void
2782 maint_btrace_clear_cmd (char *args, int from_tty)
2783 {
2784 struct btrace_thread_info *btinfo;
2785 struct thread_info *tp;
2786
2787 if (args != NULL && *args != 0)
2788 error (_("Invalid argument."));
2789
2790 tp = find_thread_ptid (inferior_ptid);
2791 if (tp == NULL)
2792 error (_("No thread."));
2793
2794 btrace_clear (tp);
2795 }
2796
2797 /* The "maintenance btrace" command. */
2798
2799 static void
2800 maint_btrace_cmd (char *args, int from_tty)
2801 {
2802 help_list (maint_btrace_cmdlist, "maintenance btrace ", all_commands,
2803 gdb_stdout);
2804 }
2805
2806 /* The "maintenance set btrace" command. */
2807
2808 static void
2809 maint_btrace_set_cmd (char *args, int from_tty)
2810 {
2811 help_list (maint_btrace_set_cmdlist, "maintenance set btrace ", all_commands,
2812 gdb_stdout);
2813 }
2814
2815 /* The "maintenance show btrace" command. */
2816
2817 static void
2818 maint_btrace_show_cmd (char *args, int from_tty)
2819 {
2820 help_list (maint_btrace_show_cmdlist, "maintenance show btrace ",
2821 all_commands, gdb_stdout);
2822 }
2823
2824 /* The "maintenance set btrace pt" command. */
2825
2826 static void
2827 maint_btrace_pt_set_cmd (char *args, int from_tty)
2828 {
2829 help_list (maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2830 all_commands, gdb_stdout);
2831 }
2832
2833 /* The "maintenance show btrace pt" command. */
2834
2835 static void
2836 maint_btrace_pt_show_cmd (char *args, int from_tty)
2837 {
2838 help_list (maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2839 all_commands, gdb_stdout);
2840 }
2841
2842 /* The "maintenance info btrace" command. */
2843
2844 static void
2845 maint_info_btrace_cmd (char *args, int from_tty)
2846 {
2847 struct btrace_thread_info *btinfo;
2848 struct thread_info *tp;
2849 const struct btrace_config *conf;
2850
2851 if (args != NULL && *args != 0)
2852 error (_("Invalid argument."));
2853
2854 tp = find_thread_ptid (inferior_ptid);
2855 if (tp == NULL)
2856 error (_("No thread."));
2857
2858 btinfo = &tp->btrace;
2859
2860 conf = btrace_conf (btinfo);
2861 if (conf == NULL)
2862 error (_("No btrace configuration."));
2863
2864 printf_unfiltered (_("Format: %s.\n"),
2865 btrace_format_string (conf->format));
2866
2867 switch (conf->format)
2868 {
2869 default:
2870 break;
2871
2872 case BTRACE_FORMAT_BTS:
2873 printf_unfiltered (_("Number of packets: %u.\n"),
2874 VEC_length (btrace_block_s,
2875 btinfo->data.variant.bts.blocks));
2876 break;
2877
2878 #if defined (HAVE_LIBIPT)
2879 case BTRACE_FORMAT_PT:
2880 {
2881 struct pt_version version;
2882
2883 version = pt_library_version ();
2884 printf_unfiltered (_("Version: %u.%u.%u%s.\n"), version.major,
2885 version.minor, version.build,
2886 version.ext != NULL ? version.ext : "");
2887
2888 btrace_maint_update_pt_packets (btinfo);
2889 printf_unfiltered (_("Number of packets: %u.\n"),
2890 VEC_length (btrace_pt_packet_s,
2891 btinfo->maint.variant.pt.packets));
2892 }
2893 break;
2894 #endif /* defined (HAVE_LIBIPT) */
2895 }
2896 }
2897
2898 /* The "maint show btrace pt skip-pad" show value function. */
2899
2900 static void
2901 show_maint_btrace_pt_skip_pad (struct ui_file *file, int from_tty,
2902 struct cmd_list_element *c,
2903 const char *value)
2904 {
2905 fprintf_filtered (file, _("Skip PAD packets is %s.\n"), value);
2906 }
2907
2908
2909 /* Initialize btrace maintenance commands. */
2910
2911 void _initialize_btrace (void);
2912 void
2913 _initialize_btrace (void)
2914 {
2915 add_cmd ("btrace", class_maintenance, maint_info_btrace_cmd,
2916 _("Info about branch tracing data."), &maintenanceinfolist);
2917
2918 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_cmd,
2919 _("Branch tracing maintenance commands."),
2920 &maint_btrace_cmdlist, "maintenance btrace ",
2921 0, &maintenancelist);
2922
2923 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_set_cmd, _("\
2924 Set branch tracing specific variables."),
2925 &maint_btrace_set_cmdlist, "maintenance set btrace ",
2926 0, &maintenance_set_cmdlist);
2927
2928 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_set_cmd, _("\
2929 Set Intel(R) Processor Trace specific variables."),
2930 &maint_btrace_pt_set_cmdlist, "maintenance set btrace pt ",
2931 0, &maint_btrace_set_cmdlist);
2932
2933 add_prefix_cmd ("btrace", class_maintenance, maint_btrace_show_cmd, _("\
2934 Show branch tracing specific variables."),
2935 &maint_btrace_show_cmdlist, "maintenance show btrace ",
2936 0, &maintenance_show_cmdlist);
2937
2938 add_prefix_cmd ("pt", class_maintenance, maint_btrace_pt_show_cmd, _("\
2939 Show Intel(R) Processor Trace specific variables."),
2940 &maint_btrace_pt_show_cmdlist, "maintenance show btrace pt ",
2941 0, &maint_btrace_show_cmdlist);
2942
2943 add_setshow_boolean_cmd ("skip-pad", class_maintenance,
2944 &maint_btrace_pt_skip_pad, _("\
2945 Set whether PAD packets should be skipped in the btrace packet history."), _("\
2946 Show whether PAD packets should be skipped in the btrace packet history."),_("\
2947 When enabled, PAD packets are ignored in the btrace packet history."),
2948 NULL, show_maint_btrace_pt_skip_pad,
2949 &maint_btrace_pt_set_cmdlist,
2950 &maint_btrace_pt_show_cmdlist);
2951
2952 add_cmd ("packet-history", class_maintenance, maint_btrace_packet_history_cmd,
2953 _("Print the raw branch tracing data.\n\
2954 With no argument, print ten more packets after the previous ten-line print.\n\
2955 With '-' as argument print ten packets before a previous ten-line print.\n\
2956 One argument specifies the starting packet of a ten-line print.\n\
2957 Two arguments with comma between specify starting and ending packets to \
2958 print.\n\
2959 Preceded with '+'/'-' the second argument specifies the distance from the \
2960 first.\n"),
2961 &maint_btrace_cmdlist);
2962
2963 add_cmd ("clear-packet-history", class_maintenance,
2964 maint_btrace_clear_packet_history_cmd,
2965 _("Clears the branch tracing packet history.\n\
2966 Discards the raw branch tracing data but not the execution history data.\n\
2967 "),
2968 &maint_btrace_cmdlist);
2969
2970 add_cmd ("clear", class_maintenance, maint_btrace_clear_cmd,
2971 _("Clears the branch tracing data.\n\
2972 Discards the raw branch tracing data and the execution history data.\n\
2973 The next 'record' command will fetch the branch tracing data anew.\n\
2974 "),
2975 &maint_btrace_cmdlist);
2976
2977 }