1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
31 #include "filenames.h"
32 #include "xml-support.h"
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
38 #define DEBUG(msg, args...) \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
53 ftrace_print_function_name (const struct btrace_function
*bfun
)
55 struct minimal_symbol
*msym
;
62 return SYMBOL_PRINT_NAME (sym
);
65 return MSYMBOL_PRINT_NAME (msym
);
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
74 ftrace_print_filename (const struct btrace_function
*bfun
)
82 filename
= symtab_to_filename_for_display (symbol_symtab (sym
));
84 filename
= "<unknown>";
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
93 ftrace_print_insn_addr (const struct btrace_insn
*insn
)
98 return core_addr_to_string_nz (insn
->pc
);
101 /* Print an ftrace debug status message. */
104 ftrace_debug (const struct btrace_function
*bfun
, const char *prefix
)
106 const char *fun
, *file
;
107 unsigned int ibegin
, iend
;
110 fun
= ftrace_print_function_name (bfun
);
111 file
= ftrace_print_filename (bfun
);
114 ibegin
= bfun
->insn_offset
;
115 iend
= ibegin
+ VEC_length (btrace_insn_s
, bfun
->insn
);
117 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
118 prefix
, fun
, file
, level
, ibegin
, iend
);
121 /* Return non-zero if BFUN does not match MFUN and FUN,
122 return zero otherwise. */
125 ftrace_function_switched (const struct btrace_function
*bfun
,
126 const struct minimal_symbol
*mfun
,
127 const struct symbol
*fun
)
129 struct minimal_symbol
*msym
;
135 /* If the minimal symbol changed, we certainly switched functions. */
136 if (mfun
!= NULL
&& msym
!= NULL
137 && strcmp (MSYMBOL_LINKAGE_NAME (mfun
), MSYMBOL_LINKAGE_NAME (msym
)) != 0)
140 /* If the symbol changed, we certainly switched functions. */
141 if (fun
!= NULL
&& sym
!= NULL
)
143 const char *bfname
, *fname
;
145 /* Check the function name. */
146 if (strcmp (SYMBOL_LINKAGE_NAME (fun
), SYMBOL_LINKAGE_NAME (sym
)) != 0)
149 /* Check the location of those functions, as well. */
150 bfname
= symtab_to_fullname (symbol_symtab (sym
));
151 fname
= symtab_to_fullname (symbol_symtab (fun
));
152 if (filename_cmp (fname
, bfname
) != 0)
156 /* If we lost symbol information, we switched functions. */
157 if (!(msym
== NULL
&& sym
== NULL
) && mfun
== NULL
&& fun
== NULL
)
160 /* If we gained symbol information, we switched functions. */
161 if (msym
== NULL
&& sym
== NULL
&& !(mfun
== NULL
&& fun
== NULL
))
167 /* Allocate and initialize a new branch trace function segment.
168 PREV is the chronologically preceding function segment.
169 MFUN and FUN are the symbol information we have for this function. */
171 static struct btrace_function
*
172 ftrace_new_function (struct btrace_function
*prev
,
173 struct minimal_symbol
*mfun
,
176 struct btrace_function
*bfun
;
178 bfun
= xzalloc (sizeof (*bfun
));
182 bfun
->flow
.prev
= prev
;
186 /* Start counting at one. */
188 bfun
->insn_offset
= 1;
192 gdb_assert (prev
->flow
.next
== NULL
);
193 prev
->flow
.next
= bfun
;
195 bfun
->number
= prev
->number
+ 1;
196 bfun
->insn_offset
= (prev
->insn_offset
197 + VEC_length (btrace_insn_s
, prev
->insn
));
198 bfun
->level
= prev
->level
;
204 /* Update the UP field of a function segment. */
207 ftrace_update_caller (struct btrace_function
*bfun
,
208 struct btrace_function
*caller
,
209 enum btrace_function_flag flags
)
211 if (bfun
->up
!= NULL
)
212 ftrace_debug (bfun
, "updating caller");
217 ftrace_debug (bfun
, "set caller");
220 /* Fix up the caller for all segments of a function. */
223 ftrace_fixup_caller (struct btrace_function
*bfun
,
224 struct btrace_function
*caller
,
225 enum btrace_function_flag flags
)
227 struct btrace_function
*prev
, *next
;
229 ftrace_update_caller (bfun
, caller
, flags
);
231 /* Update all function segments belonging to the same function. */
232 for (prev
= bfun
->segment
.prev
; prev
!= NULL
; prev
= prev
->segment
.prev
)
233 ftrace_update_caller (prev
, caller
, flags
);
235 for (next
= bfun
->segment
.next
; next
!= NULL
; next
= next
->segment
.next
)
236 ftrace_update_caller (next
, caller
, flags
);
239 /* Add a new function segment for a call.
240 CALLER is the chronologically preceding function segment.
241 MFUN and FUN are the symbol information we have for this function. */
243 static struct btrace_function
*
244 ftrace_new_call (struct btrace_function
*caller
,
245 struct minimal_symbol
*mfun
,
248 struct btrace_function
*bfun
;
250 bfun
= ftrace_new_function (caller
, mfun
, fun
);
254 ftrace_debug (bfun
, "new call");
259 /* Add a new function segment for a tail call.
260 CALLER is the chronologically preceding function segment.
261 MFUN and FUN are the symbol information we have for this function. */
263 static struct btrace_function
*
264 ftrace_new_tailcall (struct btrace_function
*caller
,
265 struct minimal_symbol
*mfun
,
268 struct btrace_function
*bfun
;
270 bfun
= ftrace_new_function (caller
, mfun
, fun
);
273 bfun
->flags
|= BFUN_UP_LINKS_TO_TAILCALL
;
275 ftrace_debug (bfun
, "new tail call");
280 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
281 symbol information. */
283 static struct btrace_function
*
284 ftrace_find_caller (struct btrace_function
*bfun
,
285 struct minimal_symbol
*mfun
,
288 for (; bfun
!= NULL
; bfun
= bfun
->up
)
290 /* Skip functions with incompatible symbol information. */
291 if (ftrace_function_switched (bfun
, mfun
, fun
))
294 /* This is the function segment we're looking for. */
301 /* Find the innermost caller in the back trace of BFUN, skipping all
302 function segments that do not end with a call instruction (e.g.
303 tail calls ending with a jump). */
305 static struct btrace_function
*
306 ftrace_find_call (struct btrace_function
*bfun
)
308 for (; bfun
!= NULL
; bfun
= bfun
->up
)
310 struct btrace_insn
*last
;
313 if (bfun
->errcode
!= 0)
316 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
318 if (last
->iclass
== BTRACE_INSN_CALL
)
325 /* Add a continuation segment for a function into which we return.
326 PREV is the chronologically preceding function segment.
327 MFUN and FUN are the symbol information we have for this function. */
329 static struct btrace_function
*
330 ftrace_new_return (struct btrace_function
*prev
,
331 struct minimal_symbol
*mfun
,
334 struct btrace_function
*bfun
, *caller
;
336 bfun
= ftrace_new_function (prev
, mfun
, fun
);
338 /* It is important to start at PREV's caller. Otherwise, we might find
339 PREV itself, if PREV is a recursive function. */
340 caller
= ftrace_find_caller (prev
->up
, mfun
, fun
);
343 /* The caller of PREV is the preceding btrace function segment in this
344 function instance. */
345 gdb_assert (caller
->segment
.next
== NULL
);
347 caller
->segment
.next
= bfun
;
348 bfun
->segment
.prev
= caller
;
350 /* Maintain the function level. */
351 bfun
->level
= caller
->level
;
353 /* Maintain the call stack. */
354 bfun
->up
= caller
->up
;
355 bfun
->flags
= caller
->flags
;
357 ftrace_debug (bfun
, "new return");
361 /* We did not find a caller. This could mean that something went
362 wrong or that the call is simply not included in the trace. */
364 /* Let's search for some actual call. */
365 caller
= ftrace_find_call (prev
->up
);
368 /* There is no call in PREV's back trace. We assume that the
369 branch trace did not include it. */
371 /* Let's find the topmost call function - this skips tail calls. */
372 while (prev
->up
!= NULL
)
375 /* We maintain levels for a series of returns for which we have
377 We start at the preceding function's level in case this has
378 already been a return for which we have not seen the call.
379 We start at level 0 otherwise, to handle tail calls correctly. */
380 bfun
->level
= min (0, prev
->level
) - 1;
382 /* Fix up the call stack for PREV. */
383 ftrace_fixup_caller (prev
, bfun
, BFUN_UP_LINKS_TO_RET
);
385 ftrace_debug (bfun
, "new return - no caller");
389 /* There is a call in PREV's back trace to which we should have
390 returned. Let's remain at this level. */
391 bfun
->level
= prev
->level
;
393 ftrace_debug (bfun
, "new return - unknown caller");
400 /* Add a new function segment for a function switch.
401 PREV is the chronologically preceding function segment.
402 MFUN and FUN are the symbol information we have for this function. */
404 static struct btrace_function
*
405 ftrace_new_switch (struct btrace_function
*prev
,
406 struct minimal_symbol
*mfun
,
409 struct btrace_function
*bfun
;
411 /* This is an unexplained function switch. The call stack will likely
412 be wrong at this point. */
413 bfun
= ftrace_new_function (prev
, mfun
, fun
);
415 ftrace_debug (bfun
, "new switch");
420 /* Add a new function segment for a gap in the trace due to a decode error.
421 PREV is the chronologically preceding function segment.
422 ERRCODE is the format-specific error code. */
424 static struct btrace_function
*
425 ftrace_new_gap (struct btrace_function
*prev
, int errcode
)
427 struct btrace_function
*bfun
;
429 /* We hijack prev if it was empty. */
430 if (prev
!= NULL
&& prev
->errcode
== 0
431 && VEC_empty (btrace_insn_s
, prev
->insn
))
434 bfun
= ftrace_new_function (prev
, NULL
, NULL
);
436 bfun
->errcode
= errcode
;
438 ftrace_debug (bfun
, "new gap");
443 /* Update BFUN with respect to the instruction at PC. This may create new
445 Return the chronologically latest function segment, never NULL. */
447 static struct btrace_function
*
448 ftrace_update_function (struct btrace_function
*bfun
, CORE_ADDR pc
)
450 struct bound_minimal_symbol bmfun
;
451 struct minimal_symbol
*mfun
;
453 struct btrace_insn
*last
;
455 /* Try to determine the function we're in. We use both types of symbols
456 to avoid surprises when we sometimes get a full symbol and sometimes
457 only a minimal symbol. */
458 fun
= find_pc_function (pc
);
459 bmfun
= lookup_minimal_symbol_by_pc (pc
);
462 if (fun
== NULL
&& mfun
== NULL
)
463 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc
));
465 /* If we didn't have a function or if we had a gap before, we create one. */
466 if (bfun
== NULL
|| bfun
->errcode
!= 0)
467 return ftrace_new_function (bfun
, mfun
, fun
);
469 /* Check the last instruction, if we have one.
470 We do this check first, since it allows us to fill in the call stack
471 links in addition to the normal flow links. */
473 if (!VEC_empty (btrace_insn_s
, bfun
->insn
))
474 last
= VEC_last (btrace_insn_s
, bfun
->insn
);
478 switch (last
->iclass
)
480 case BTRACE_INSN_RETURN
:
484 /* On some systems, _dl_runtime_resolve returns to the resolved
485 function instead of jumping to it. From our perspective,
486 however, this is a tailcall.
487 If we treated it as return, we wouldn't be able to find the
488 resolved function in our stack back trace. Hence, we would
489 lose the current stack back trace and start anew with an empty
490 back trace. When the resolved function returns, we would then
491 create a stack back trace with the same function names but
492 different frame id's. This will confuse stepping. */
493 fname
= ftrace_print_function_name (bfun
);
494 if (strcmp (fname
, "_dl_runtime_resolve") == 0)
495 return ftrace_new_tailcall (bfun
, mfun
, fun
);
497 return ftrace_new_return (bfun
, mfun
, fun
);
500 case BTRACE_INSN_CALL
:
501 /* Ignore calls to the next instruction. They are used for PIC. */
502 if (last
->pc
+ last
->size
== pc
)
505 return ftrace_new_call (bfun
, mfun
, fun
);
507 case BTRACE_INSN_JUMP
:
511 start
= get_pc_function_start (pc
);
513 /* If we can't determine the function for PC, we treat a jump at
514 the end of the block as tail call. */
515 if (start
== 0 || start
== pc
)
516 return ftrace_new_tailcall (bfun
, mfun
, fun
);
521 /* Check if we're switching functions for some other reason. */
522 if (ftrace_function_switched (bfun
, mfun
, fun
))
524 DEBUG_FTRACE ("switching from %s in %s at %s",
525 ftrace_print_insn_addr (last
),
526 ftrace_print_function_name (bfun
),
527 ftrace_print_filename (bfun
));
529 return ftrace_new_switch (bfun
, mfun
, fun
);
535 /* Add the instruction at PC to BFUN's instructions. */
538 ftrace_update_insns (struct btrace_function
*bfun
,
539 const struct btrace_insn
*insn
)
541 VEC_safe_push (btrace_insn_s
, bfun
->insn
, insn
);
543 if (record_debug
> 1)
544 ftrace_debug (bfun
, "update insn");
547 /* Classify the instruction at PC. */
549 static enum btrace_insn_class
550 ftrace_classify_insn (struct gdbarch
*gdbarch
, CORE_ADDR pc
)
552 volatile struct gdb_exception error
;
553 enum btrace_insn_class iclass
;
555 iclass
= BTRACE_INSN_OTHER
;
556 TRY_CATCH (error
, RETURN_MASK_ERROR
)
558 if (gdbarch_insn_is_call (gdbarch
, pc
))
559 iclass
= BTRACE_INSN_CALL
;
560 else if (gdbarch_insn_is_ret (gdbarch
, pc
))
561 iclass
= BTRACE_INSN_RETURN
;
562 else if (gdbarch_insn_is_jump (gdbarch
, pc
))
563 iclass
= BTRACE_INSN_JUMP
;
569 /* Compute the function branch trace from BTS trace. */
572 btrace_compute_ftrace_bts (struct thread_info
*tp
,
573 const struct btrace_data_bts
*btrace
)
575 struct btrace_thread_info
*btinfo
;
576 struct btrace_function
*begin
, *end
;
577 struct gdbarch
*gdbarch
;
578 unsigned int blk
, ngaps
;
581 gdbarch
= target_gdbarch ();
582 btinfo
= &tp
->btrace
;
583 begin
= btinfo
->begin
;
585 ngaps
= btinfo
->ngaps
;
586 level
= begin
!= NULL
? -btinfo
->level
: INT_MAX
;
587 blk
= VEC_length (btrace_block_s
, btrace
->blocks
);
591 btrace_block_s
*block
;
596 block
= VEC_index (btrace_block_s
, btrace
->blocks
, blk
);
601 volatile struct gdb_exception error
;
602 struct btrace_insn insn
;
605 /* We should hit the end of the block. Warn if we went too far. */
608 /* Indicate the gap in the trace - unless we're at the
612 warning (_("Recorded trace may be corrupted around %s."),
613 core_addr_to_string_nz (pc
));
615 end
= ftrace_new_gap (end
, BDE_BTS_OVERFLOW
);
621 end
= ftrace_update_function (end
, pc
);
625 /* Maintain the function level offset.
626 For all but the last block, we do it here. */
628 level
= min (level
, end
->level
);
631 TRY_CATCH (error
, RETURN_MASK_ERROR
)
632 size
= gdb_insn_length (gdbarch
, pc
);
636 insn
.iclass
= ftrace_classify_insn (gdbarch
, pc
);
638 ftrace_update_insns (end
, &insn
);
640 /* We're done once we pushed the instruction at the end. */
641 if (block
->end
== pc
)
644 /* We can't continue if we fail to compute the size. */
647 warning (_("Recorded trace may be incomplete around %s."),
648 core_addr_to_string_nz (pc
));
650 /* Indicate the gap in the trace. We just added INSN so we're
651 not at the beginning. */
652 end
= ftrace_new_gap (end
, BDE_BTS_INSN_SIZE
);
660 /* Maintain the function level offset.
661 For the last block, we do it here to not consider the last
663 Since the last instruction corresponds to the current instruction
664 and is not really part of the execution history, it shouldn't
667 level
= min (level
, end
->level
);
671 btinfo
->begin
= begin
;
673 btinfo
->ngaps
= ngaps
;
675 /* LEVEL is the minimal function level of all btrace function segments.
676 Define the global level offset to -LEVEL so all function levels are
677 normalized to start at zero. */
678 btinfo
->level
= -level
;
681 /* Compute the function branch trace from a block branch trace BTRACE for
682 a thread given by BTINFO. */
685 btrace_compute_ftrace (struct thread_info
*tp
, struct btrace_data
*btrace
)
687 DEBUG ("compute ftrace");
689 switch (btrace
->format
)
691 case BTRACE_FORMAT_NONE
:
694 case BTRACE_FORMAT_BTS
:
695 btrace_compute_ftrace_bts (tp
, &btrace
->variant
.bts
);
699 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
702 /* Add an entry for the current PC. */
705 btrace_add_pc (struct thread_info
*tp
)
707 struct btrace_data btrace
;
708 struct btrace_block
*block
;
709 struct regcache
*regcache
;
710 struct cleanup
*cleanup
;
713 regcache
= get_thread_regcache (tp
->ptid
);
714 pc
= regcache_read_pc (regcache
);
716 btrace_data_init (&btrace
);
717 btrace
.format
= BTRACE_FORMAT_BTS
;
718 btrace
.variant
.bts
.blocks
= NULL
;
720 cleanup
= make_cleanup_btrace_data (&btrace
);
722 block
= VEC_safe_push (btrace_block_s
, btrace
.variant
.bts
.blocks
, NULL
);
726 btrace_compute_ftrace (tp
, &btrace
);
728 do_cleanups (cleanup
);
734 btrace_enable (struct thread_info
*tp
, const struct btrace_config
*conf
)
736 if (tp
->btrace
.target
!= NULL
)
739 if (!target_supports_btrace (conf
->format
))
740 error (_("Target does not support branch tracing."));
742 DEBUG ("enable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
744 tp
->btrace
.target
= target_enable_btrace (tp
->ptid
, conf
);
746 /* Add an entry for the current PC so we start tracing from where we
748 if (tp
->btrace
.target
!= NULL
)
754 const struct btrace_config
*
755 btrace_conf (const struct btrace_thread_info
*btinfo
)
757 if (btinfo
->target
== NULL
)
760 return target_btrace_conf (btinfo
->target
);
766 btrace_disable (struct thread_info
*tp
)
768 struct btrace_thread_info
*btp
= &tp
->btrace
;
771 if (btp
->target
== NULL
)
774 DEBUG ("disable thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
776 target_disable_btrace (btp
->target
);
785 btrace_teardown (struct thread_info
*tp
)
787 struct btrace_thread_info
*btp
= &tp
->btrace
;
790 if (btp
->target
== NULL
)
793 DEBUG ("teardown thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
795 target_teardown_btrace (btp
->target
);
801 /* Stitch branch trace in BTS format. */
804 btrace_stitch_bts (struct btrace_data_bts
*btrace
, struct thread_info
*tp
)
806 struct btrace_thread_info
*btinfo
;
807 struct btrace_function
*last_bfun
;
808 struct btrace_insn
*last_insn
;
809 btrace_block_s
*first_new_block
;
811 btinfo
= &tp
->btrace
;
812 last_bfun
= btinfo
->end
;
813 gdb_assert (last_bfun
!= NULL
);
814 gdb_assert (!VEC_empty (btrace_block_s
, btrace
->blocks
));
816 /* If the existing trace ends with a gap, we just glue the traces
817 together. We need to drop the last (i.e. chronologically first) block
818 of the new trace, though, since we can't fill in the start address.*/
819 if (VEC_empty (btrace_insn_s
, last_bfun
->insn
))
821 VEC_pop (btrace_block_s
, btrace
->blocks
);
825 /* Beware that block trace starts with the most recent block, so the
826 chronologically first block in the new trace is the last block in
827 the new trace's block vector. */
828 first_new_block
= VEC_last (btrace_block_s
, btrace
->blocks
);
829 last_insn
= VEC_last (btrace_insn_s
, last_bfun
->insn
);
831 /* If the current PC at the end of the block is the same as in our current
832 trace, there are two explanations:
833 1. we executed the instruction and some branch brought us back.
834 2. we have not made any progress.
835 In the first case, the delta trace vector should contain at least two
837 In the second case, the delta trace vector should contain exactly one
838 entry for the partial block containing the current PC. Remove it. */
839 if (first_new_block
->end
== last_insn
->pc
840 && VEC_length (btrace_block_s
, btrace
->blocks
) == 1)
842 VEC_pop (btrace_block_s
, btrace
->blocks
);
846 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn
),
847 core_addr_to_string_nz (first_new_block
->end
));
849 /* Do a simple sanity check to make sure we don't accidentally end up
850 with a bad block. This should not occur in practice. */
851 if (first_new_block
->end
< last_insn
->pc
)
853 warning (_("Error while trying to read delta trace. Falling back to "
858 /* We adjust the last block to start at the end of our current trace. */
859 gdb_assert (first_new_block
->begin
== 0);
860 first_new_block
->begin
= last_insn
->pc
;
862 /* We simply pop the last insn so we can insert it again as part of
863 the normal branch trace computation.
864 Since instruction iterators are based on indices in the instructions
865 vector, we don't leave any pointers dangling. */
866 DEBUG ("pruning insn at %s for stitching",
867 ftrace_print_insn_addr (last_insn
));
869 VEC_pop (btrace_insn_s
, last_bfun
->insn
);
871 /* The instructions vector may become empty temporarily if this has
872 been the only instruction in this function segment.
873 This violates the invariant but will be remedied shortly by
874 btrace_compute_ftrace when we add the new trace. */
876 /* The only case where this would hurt is if the entire trace consisted
877 of just that one instruction. If we remove it, we might turn the now
878 empty btrace function segment into a gap. But we don't want gaps at
879 the beginning. To avoid this, we remove the entire old trace. */
880 if (last_bfun
== btinfo
->begin
&& VEC_empty (btrace_insn_s
, last_bfun
->insn
))
886 /* Adjust the block trace in order to stitch old and new trace together.
887 BTRACE is the new delta trace between the last and the current stop.
888 TP is the traced thread.
889 May modifx BTRACE as well as the existing trace in TP.
890 Return 0 on success, -1 otherwise. */
893 btrace_stitch_trace (struct btrace_data
*btrace
, struct thread_info
*tp
)
895 /* If we don't have trace, there's nothing to do. */
896 if (btrace_data_empty (btrace
))
899 switch (btrace
->format
)
901 case BTRACE_FORMAT_NONE
:
904 case BTRACE_FORMAT_BTS
:
905 return btrace_stitch_bts (&btrace
->variant
.bts
, tp
);
908 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
911 /* Clear the branch trace histories in BTINFO. */
914 btrace_clear_history (struct btrace_thread_info
*btinfo
)
916 xfree (btinfo
->insn_history
);
917 xfree (btinfo
->call_history
);
918 xfree (btinfo
->replay
);
920 btinfo
->insn_history
= NULL
;
921 btinfo
->call_history
= NULL
;
922 btinfo
->replay
= NULL
;
928 btrace_fetch (struct thread_info
*tp
)
930 struct btrace_thread_info
*btinfo
;
931 struct btrace_target_info
*tinfo
;
932 struct btrace_data btrace
;
933 struct cleanup
*cleanup
;
936 DEBUG ("fetch thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
938 btinfo
= &tp
->btrace
;
939 tinfo
= btinfo
->target
;
943 /* There's no way we could get new trace while replaying.
944 On the other hand, delta trace would return a partial record with the
945 current PC, which is the replay PC, not the last PC, as expected. */
946 if (btinfo
->replay
!= NULL
)
949 btrace_data_init (&btrace
);
950 cleanup
= make_cleanup_btrace_data (&btrace
);
952 /* Let's first try to extend the trace we already have. */
953 if (btinfo
->end
!= NULL
)
955 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_DELTA
);
958 /* Success. Let's try to stitch the traces together. */
959 errcode
= btrace_stitch_trace (&btrace
, tp
);
963 /* We failed to read delta trace. Let's try to read new trace. */
964 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_NEW
);
966 /* If we got any new trace, discard what we have. */
967 if (errcode
== 0 && !btrace_data_empty (&btrace
))
971 /* If we were not able to read the trace, we start over. */
975 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
979 errcode
= target_read_btrace (&btrace
, tinfo
, BTRACE_READ_ALL
);
981 /* If we were not able to read the branch trace, signal an error. */
983 error (_("Failed to read branch trace."));
985 /* Compute the trace, provided we have any. */
986 if (!btrace_data_empty (&btrace
))
988 btrace_clear_history (btinfo
);
989 btrace_compute_ftrace (tp
, &btrace
);
992 do_cleanups (cleanup
);
998 btrace_clear (struct thread_info
*tp
)
1000 struct btrace_thread_info
*btinfo
;
1001 struct btrace_function
*it
, *trash
;
1003 DEBUG ("clear thread %d (%s)", tp
->num
, target_pid_to_str (tp
->ptid
));
1005 /* Make sure btrace frames that may hold a pointer into the branch
1006 trace data are destroyed. */
1007 reinit_frame_cache ();
1009 btinfo
= &tp
->btrace
;
1020 btinfo
->begin
= NULL
;
1024 btrace_clear_history (btinfo
);
1030 btrace_free_objfile (struct objfile
*objfile
)
1032 struct thread_info
*tp
;
1034 DEBUG ("free objfile");
1036 ALL_NON_EXITED_THREADS (tp
)
1040 #if defined (HAVE_LIBEXPAT)
1042 /* Check the btrace document version. */
1045 check_xml_btrace_version (struct gdb_xml_parser
*parser
,
1046 const struct gdb_xml_element
*element
,
1047 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1049 const char *version
= xml_find_attribute (attributes
, "version")->value
;
1051 if (strcmp (version
, "1.0") != 0)
1052 gdb_xml_error (parser
, _("Unsupported btrace version: \"%s\""), version
);
1055 /* Parse a btrace "block" xml record. */
1058 parse_xml_btrace_block (struct gdb_xml_parser
*parser
,
1059 const struct gdb_xml_element
*element
,
1060 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1062 struct btrace_data
*btrace
;
1063 struct btrace_block
*block
;
1064 ULONGEST
*begin
, *end
;
1068 switch (btrace
->format
)
1070 case BTRACE_FORMAT_BTS
:
1073 case BTRACE_FORMAT_NONE
:
1074 btrace
->format
= BTRACE_FORMAT_BTS
;
1075 btrace
->variant
.bts
.blocks
= NULL
;
1079 gdb_xml_error (parser
, _("Btrace format error."));
1082 begin
= xml_find_attribute (attributes
, "begin")->value
;
1083 end
= xml_find_attribute (attributes
, "end")->value
;
1085 block
= VEC_safe_push (btrace_block_s
, btrace
->variant
.bts
.blocks
, NULL
);
1086 block
->begin
= *begin
;
1090 static const struct gdb_xml_attribute block_attributes
[] = {
1091 { "begin", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1092 { "end", GDB_XML_AF_NONE
, gdb_xml_parse_attr_ulongest
, NULL
},
1093 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1096 static const struct gdb_xml_attribute btrace_attributes
[] = {
1097 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1098 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1101 static const struct gdb_xml_element btrace_children
[] = {
1102 { "block", block_attributes
, NULL
,
1103 GDB_XML_EF_REPEATABLE
| GDB_XML_EF_OPTIONAL
, parse_xml_btrace_block
, NULL
},
1104 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1107 static const struct gdb_xml_element btrace_elements
[] = {
1108 { "btrace", btrace_attributes
, btrace_children
, GDB_XML_EF_NONE
,
1109 check_xml_btrace_version
, NULL
},
1110 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1113 #endif /* defined (HAVE_LIBEXPAT) */
1118 parse_xml_btrace (struct btrace_data
*btrace
, const char *buffer
)
1120 struct cleanup
*cleanup
;
1123 #if defined (HAVE_LIBEXPAT)
1125 btrace
->format
= BTRACE_FORMAT_NONE
;
1127 cleanup
= make_cleanup_btrace_data (btrace
);
1128 errcode
= gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements
,
1131 error (_("Error parsing branch trace."));
1133 /* Keep parse results. */
1134 discard_cleanups (cleanup
);
1136 #else /* !defined (HAVE_LIBEXPAT) */
1138 error (_("Cannot process branch trace. XML parsing is not supported."));
1140 #endif /* !defined (HAVE_LIBEXPAT) */
1143 #if defined (HAVE_LIBEXPAT)
1145 /* Parse a btrace-conf "bts" xml record. */
1148 parse_xml_btrace_conf_bts (struct gdb_xml_parser
*parser
,
1149 const struct gdb_xml_element
*element
,
1150 void *user_data
, VEC (gdb_xml_value_s
) *attributes
)
1152 struct btrace_config
*conf
;
1153 struct gdb_xml_value
*size
;
1156 conf
->format
= BTRACE_FORMAT_BTS
;
1159 size
= xml_find_attribute (attributes
, "size");
1161 conf
->bts
.size
= (unsigned int) * (ULONGEST
*) size
->value
;
1164 static const struct gdb_xml_attribute btrace_conf_bts_attributes
[] = {
1165 { "size", GDB_XML_AF_OPTIONAL
, gdb_xml_parse_attr_ulongest
, NULL
},
1166 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1169 static const struct gdb_xml_element btrace_conf_children
[] = {
1170 { "bts", btrace_conf_bts_attributes
, NULL
, GDB_XML_EF_OPTIONAL
,
1171 parse_xml_btrace_conf_bts
, NULL
},
1172 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1175 static const struct gdb_xml_attribute btrace_conf_attributes
[] = {
1176 { "version", GDB_XML_AF_NONE
, NULL
, NULL
},
1177 { NULL
, GDB_XML_AF_NONE
, NULL
, NULL
}
1180 static const struct gdb_xml_element btrace_conf_elements
[] = {
1181 { "btrace-conf", btrace_conf_attributes
, btrace_conf_children
,
1182 GDB_XML_EF_NONE
, NULL
, NULL
},
1183 { NULL
, NULL
, NULL
, GDB_XML_EF_NONE
, NULL
, NULL
}
1186 #endif /* defined (HAVE_LIBEXPAT) */
1191 parse_xml_btrace_conf (struct btrace_config
*conf
, const char *xml
)
1195 #if defined (HAVE_LIBEXPAT)
1197 errcode
= gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1198 btrace_conf_elements
, xml
, conf
);
1200 error (_("Error parsing branch trace configuration."));
1202 #else /* !defined (HAVE_LIBEXPAT) */
1204 error (_("XML parsing is not supported."));
1206 #endif /* !defined (HAVE_LIBEXPAT) */
1211 const struct btrace_insn
*
1212 btrace_insn_get (const struct btrace_insn_iterator
*it
)
1214 const struct btrace_function
*bfun
;
1215 unsigned int index
, end
;
1218 bfun
= it
->function
;
1220 /* Check if the iterator points to a gap in the trace. */
1221 if (bfun
->errcode
!= 0)
1224 /* The index is within the bounds of this function's instruction vector. */
1225 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1226 gdb_assert (0 < end
);
1227 gdb_assert (index
< end
);
1229 return VEC_index (btrace_insn_s
, bfun
->insn
, index
);
1235 btrace_insn_number (const struct btrace_insn_iterator
*it
)
1237 const struct btrace_function
*bfun
;
1239 bfun
= it
->function
;
1241 /* Return zero if the iterator points to a gap in the trace. */
1242 if (bfun
->errcode
!= 0)
1245 return bfun
->insn_offset
+ it
->index
;
1251 btrace_insn_begin (struct btrace_insn_iterator
*it
,
1252 const struct btrace_thread_info
*btinfo
)
1254 const struct btrace_function
*bfun
;
1256 bfun
= btinfo
->begin
;
1258 error (_("No trace."));
1260 it
->function
= bfun
;
1267 btrace_insn_end (struct btrace_insn_iterator
*it
,
1268 const struct btrace_thread_info
*btinfo
)
1270 const struct btrace_function
*bfun
;
1271 unsigned int length
;
1275 error (_("No trace."));
1277 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1279 /* The last function may either be a gap or it contains the current
1280 instruction, which is one past the end of the execution trace; ignore
1285 it
->function
= bfun
;
1292 btrace_insn_next (struct btrace_insn_iterator
*it
, unsigned int stride
)
1294 const struct btrace_function
*bfun
;
1295 unsigned int index
, steps
;
1297 bfun
= it
->function
;
1303 unsigned int end
, space
, adv
;
1305 end
= VEC_length (btrace_insn_s
, bfun
->insn
);
1307 /* An empty function segment represents a gap in the trace. We count
1308 it as one instruction. */
1311 const struct btrace_function
*next
;
1313 next
= bfun
->flow
.next
;
1326 gdb_assert (0 < end
);
1327 gdb_assert (index
< end
);
1329 /* Compute the number of instructions remaining in this segment. */
1330 space
= end
- index
;
1332 /* Advance the iterator as far as possible within this segment. */
1333 adv
= min (space
, stride
);
1338 /* Move to the next function if we're at the end of this one. */
1341 const struct btrace_function
*next
;
1343 next
= bfun
->flow
.next
;
1346 /* We stepped past the last function.
1348 Let's adjust the index to point to the last instruction in
1349 the previous function. */
1355 /* We now point to the first instruction in the new function. */
1360 /* We did make progress. */
1361 gdb_assert (adv
> 0);
1364 /* Update the iterator. */
1365 it
->function
= bfun
;
1374 btrace_insn_prev (struct btrace_insn_iterator
*it
, unsigned int stride
)
1376 const struct btrace_function
*bfun
;
1377 unsigned int index
, steps
;
1379 bfun
= it
->function
;
1387 /* Move to the previous function if we're at the start of this one. */
1390 const struct btrace_function
*prev
;
1392 prev
= bfun
->flow
.prev
;
1396 /* We point to one after the last instruction in the new function. */
1398 index
= VEC_length (btrace_insn_s
, bfun
->insn
);
1400 /* An empty function segment represents a gap in the trace. We count
1401 it as one instruction. */
1411 /* Advance the iterator as far as possible within this segment. */
1412 adv
= min (index
, stride
);
1418 /* We did make progress. */
1419 gdb_assert (adv
> 0);
1422 /* Update the iterator. */
1423 it
->function
= bfun
;
1432 btrace_insn_cmp (const struct btrace_insn_iterator
*lhs
,
1433 const struct btrace_insn_iterator
*rhs
)
1435 unsigned int lnum
, rnum
;
1437 lnum
= btrace_insn_number (lhs
);
1438 rnum
= btrace_insn_number (rhs
);
1440 /* A gap has an instruction number of zero. Things are getting more
1441 complicated if gaps are involved.
1443 We take the instruction number offset from the iterator's function.
1444 This is the number of the first instruction after the gap.
1446 This is OK as long as both lhs and rhs point to gaps. If only one of
1447 them does, we need to adjust the number based on the other's regular
1448 instruction number. Otherwise, a gap might compare equal to an
1451 if (lnum
== 0 && rnum
== 0)
1453 lnum
= lhs
->function
->insn_offset
;
1454 rnum
= rhs
->function
->insn_offset
;
1458 lnum
= lhs
->function
->insn_offset
;
1465 rnum
= rhs
->function
->insn_offset
;
1471 return (int) (lnum
- rnum
);
1477 btrace_find_insn_by_number (struct btrace_insn_iterator
*it
,
1478 const struct btrace_thread_info
*btinfo
,
1479 unsigned int number
)
1481 const struct btrace_function
*bfun
;
1482 unsigned int end
, length
;
1484 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1487 if (bfun
->errcode
!= 0)
1490 if (bfun
->insn_offset
<= number
)
1497 length
= VEC_length (btrace_insn_s
, bfun
->insn
);
1498 gdb_assert (length
> 0);
1500 end
= bfun
->insn_offset
+ length
;
1504 it
->function
= bfun
;
1505 it
->index
= number
- bfun
->insn_offset
;
1512 const struct btrace_function
*
1513 btrace_call_get (const struct btrace_call_iterator
*it
)
1515 return it
->function
;
1521 btrace_call_number (const struct btrace_call_iterator
*it
)
1523 const struct btrace_thread_info
*btinfo
;
1524 const struct btrace_function
*bfun
;
1527 btinfo
= it
->btinfo
;
1528 bfun
= it
->function
;
1530 return bfun
->number
;
1532 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1533 number of the last function. */
1535 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1537 /* If the function contains only a single instruction (i.e. the current
1538 instruction), it will be skipped and its number is already the number
1541 return bfun
->number
;
1543 /* Otherwise, return one more than the number of the last function. */
1544 return bfun
->number
+ 1;
1550 btrace_call_begin (struct btrace_call_iterator
*it
,
1551 const struct btrace_thread_info
*btinfo
)
1553 const struct btrace_function
*bfun
;
1555 bfun
= btinfo
->begin
;
1557 error (_("No trace."));
1559 it
->btinfo
= btinfo
;
1560 it
->function
= bfun
;
1566 btrace_call_end (struct btrace_call_iterator
*it
,
1567 const struct btrace_thread_info
*btinfo
)
1569 const struct btrace_function
*bfun
;
1573 error (_("No trace."));
1575 it
->btinfo
= btinfo
;
1576 it
->function
= NULL
;
1582 btrace_call_next (struct btrace_call_iterator
*it
, unsigned int stride
)
1584 const struct btrace_function
*bfun
;
1587 bfun
= it
->function
;
1589 while (bfun
!= NULL
)
1591 const struct btrace_function
*next
;
1594 next
= bfun
->flow
.next
;
1597 /* Ignore the last function if it only contains a single
1598 (i.e. the current) instruction. */
1599 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1604 if (stride
== steps
)
1611 it
->function
= bfun
;
1618 btrace_call_prev (struct btrace_call_iterator
*it
, unsigned int stride
)
1620 const struct btrace_thread_info
*btinfo
;
1621 const struct btrace_function
*bfun
;
1624 bfun
= it
->function
;
1631 btinfo
= it
->btinfo
;
1636 /* Ignore the last function if it only contains a single
1637 (i.e. the current) instruction. */
1638 insns
= VEC_length (btrace_insn_s
, bfun
->insn
);
1640 bfun
= bfun
->flow
.prev
;
1648 while (steps
< stride
)
1650 const struct btrace_function
*prev
;
1652 prev
= bfun
->flow
.prev
;
1660 it
->function
= bfun
;
1667 btrace_call_cmp (const struct btrace_call_iterator
*lhs
,
1668 const struct btrace_call_iterator
*rhs
)
1670 unsigned int lnum
, rnum
;
1672 lnum
= btrace_call_number (lhs
);
1673 rnum
= btrace_call_number (rhs
);
1675 return (int) (lnum
- rnum
);
1681 btrace_find_call_by_number (struct btrace_call_iterator
*it
,
1682 const struct btrace_thread_info
*btinfo
,
1683 unsigned int number
)
1685 const struct btrace_function
*bfun
;
1687 for (bfun
= btinfo
->end
; bfun
!= NULL
; bfun
= bfun
->flow
.prev
)
1691 bnum
= bfun
->number
;
1694 it
->btinfo
= btinfo
;
1695 it
->function
= bfun
;
1699 /* Functions are ordered and numbered consecutively. We could bail out
1700 earlier. On the other hand, it is very unlikely that we search for
1701 a nonexistent function. */
1710 btrace_set_insn_history (struct btrace_thread_info
*btinfo
,
1711 const struct btrace_insn_iterator
*begin
,
1712 const struct btrace_insn_iterator
*end
)
1714 if (btinfo
->insn_history
== NULL
)
1715 btinfo
->insn_history
= xzalloc (sizeof (*btinfo
->insn_history
));
1717 btinfo
->insn_history
->begin
= *begin
;
1718 btinfo
->insn_history
->end
= *end
;
1724 btrace_set_call_history (struct btrace_thread_info
*btinfo
,
1725 const struct btrace_call_iterator
*begin
,
1726 const struct btrace_call_iterator
*end
)
1728 gdb_assert (begin
->btinfo
== end
->btinfo
);
1730 if (btinfo
->call_history
== NULL
)
1731 btinfo
->call_history
= xzalloc (sizeof (*btinfo
->call_history
));
1733 btinfo
->call_history
->begin
= *begin
;
1734 btinfo
->call_history
->end
= *end
;
1740 btrace_is_replaying (struct thread_info
*tp
)
1742 return tp
->btrace
.replay
!= NULL
;
1748 btrace_is_empty (struct thread_info
*tp
)
1750 struct btrace_insn_iterator begin
, end
;
1751 struct btrace_thread_info
*btinfo
;
1753 btinfo
= &tp
->btrace
;
1755 if (btinfo
->begin
== NULL
)
1758 btrace_insn_begin (&begin
, btinfo
);
1759 btrace_insn_end (&end
, btinfo
);
1761 return btrace_insn_cmp (&begin
, &end
) == 0;
1764 /* Forward the cleanup request. */
1767 do_btrace_data_cleanup (void *arg
)
1769 btrace_data_fini (arg
);
1775 make_cleanup_btrace_data (struct btrace_data
*data
)
1777 return make_cleanup (do_btrace_data_cleanup
, data
);