1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "gdbthread.h"
29 #include "cli/cli-utils.h"
33 #include "filenames.h"
35 #include "frame-unwind.h"
38 #include "event-loop.h"
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops
;
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer
*record_btrace_thread_observer
;
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only
[] = "read-only";
50 static const char replay_memory_access_read_write
[] = "read-write";
51 static const char *const replay_memory_access_types
[] =
53 replay_memory_access_read_only
,
54 replay_memory_access_read_write
,
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access
= replay_memory_access_read_only
;
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element
*set_record_btrace_cmdlist
;
63 static struct cmd_list_element
*show_record_btrace_cmdlist
;
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile
;
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf
;
77 /* Command list for "record btrace". */
78 static struct cmd_list_element
*record_btrace_cmdlist
;
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
82 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
86 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
91 #define DEBUG(msg, args...) \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
101 /* Update the branch trace for the current thread and return a pointer to its
104 Throws an error if there is no thread or no trace. This function never
107 static struct thread_info
*
108 require_btrace_thread (void)
110 struct thread_info
*tp
;
114 tp
= find_thread_ptid (inferior_ptid
);
116 error (_("No thread."));
120 if (btrace_is_empty (tp
))
121 error (_("No trace."));
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
129 Throws an error if there is no thread or no trace. This function never
132 static struct btrace_thread_info
*
133 require_btrace (void)
135 struct thread_info
*tp
;
137 tp
= require_btrace_thread ();
142 /* Enable branch tracing for one thread. Warn on errors. */
145 record_btrace_enable_warn (struct thread_info
*tp
)
149 btrace_enable (tp
, &record_btrace_conf
);
151 CATCH (error
, RETURN_MASK_ERROR
)
153 warning ("%s", error
.message
);
158 /* Callback function to disable branch tracing for one thread. */
161 record_btrace_disable_callback (void *arg
)
163 struct thread_info
*tp
= (struct thread_info
*) arg
;
168 /* Enable automatic tracing of new threads. */
171 record_btrace_auto_enable (void)
173 DEBUG ("attach thread observer");
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn
);
179 /* Disable automatic tracing of new threads. */
182 record_btrace_auto_disable (void)
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer
== NULL
)
188 DEBUG ("detach thread observer");
190 observer_detach_new_thread (record_btrace_thread_observer
);
191 record_btrace_thread_observer
= NULL
;
194 /* The record-btrace async event handler function. */
197 record_btrace_handle_async_inferior_event (gdb_client_data data
)
199 inferior_event_handler (INF_REG_EVENT
, NULL
);
202 /* The to_open method of target record-btrace. */
205 record_btrace_open (const char *args
, int from_tty
)
207 struct cleanup
*disable_chain
;
208 struct thread_info
*tp
;
214 if (!target_has_execution
)
215 error (_("The program is not being run."));
217 gdb_assert (record_btrace_thread_observer
== NULL
);
219 disable_chain
= make_cleanup (null_cleanup
, NULL
);
220 ALL_NON_EXITED_THREADS (tp
)
221 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->num
))
223 btrace_enable (tp
, &record_btrace_conf
);
225 make_cleanup (record_btrace_disable_callback
, tp
);
228 record_btrace_auto_enable ();
230 push_target (&record_btrace_ops
);
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
235 record_btrace_generating_corefile
= 0;
237 observer_notify_record_changed (current_inferior (), 1);
239 discard_cleanups (disable_chain
);
242 /* The to_stop_recording method of target record-btrace. */
245 record_btrace_stop_recording (struct target_ops
*self
)
247 struct thread_info
*tp
;
249 DEBUG ("stop recording");
251 record_btrace_auto_disable ();
253 ALL_NON_EXITED_THREADS (tp
)
254 if (tp
->btrace
.target
!= NULL
)
258 /* The to_close method of target record-btrace. */
261 record_btrace_close (struct target_ops
*self
)
263 struct thread_info
*tp
;
265 if (record_btrace_async_inferior_event_handler
!= NULL
)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp
)
275 btrace_teardown (tp
);
278 /* The to_async method of target record-btrace. */
281 record_btrace_async (struct target_ops
*ops
, int enable
)
284 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
286 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
288 ops
->beneath
->to_async (ops
->beneath
, enable
);
291 /* Adjusts the size and returns a human readable size suffix. */
294 record_btrace_adjust_size (unsigned int *size
)
300 if ((sz
& ((1u << 30) - 1)) == 0)
305 else if ((sz
& ((1u << 20) - 1)) == 0)
310 else if ((sz
& ((1u << 10) - 1)) == 0)
319 /* Print a BTS configuration. */
322 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
330 suffix
= record_btrace_adjust_size (&size
);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
335 /* Print an Intel(R) Processor Trace configuration. */
338 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
346 suffix
= record_btrace_adjust_size (&size
);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
351 /* Print a branch tracing configuration. */
354 record_btrace_print_conf (const struct btrace_config
*conf
)
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf
->format
));
359 switch (conf
->format
)
361 case BTRACE_FORMAT_NONE
:
364 case BTRACE_FORMAT_BTS
:
365 record_btrace_print_bts_conf (&conf
->bts
);
368 case BTRACE_FORMAT_PT
:
369 record_btrace_print_pt_conf (&conf
->pt
);
373 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
376 /* The to_info_record method of target record-btrace. */
379 record_btrace_info (struct target_ops
*self
)
381 struct btrace_thread_info
*btinfo
;
382 const struct btrace_config
*conf
;
383 struct thread_info
*tp
;
384 unsigned int insns
, calls
, gaps
;
388 tp
= find_thread_ptid (inferior_ptid
);
390 error (_("No thread."));
392 btinfo
= &tp
->btrace
;
394 conf
= btrace_conf (btinfo
);
396 record_btrace_print_conf (conf
);
404 if (!btrace_is_empty (tp
))
406 struct btrace_call_iterator call
;
407 struct btrace_insn_iterator insn
;
409 btrace_call_end (&call
, btinfo
);
410 btrace_call_prev (&call
, 1);
411 calls
= btrace_call_number (&call
);
413 btrace_insn_end (&insn
, btinfo
);
415 insns
= btrace_insn_number (&insn
);
418 /* The last instruction does not really belong to the trace. */
425 /* Skip gaps at the end. */
428 steps
= btrace_insn_prev (&insn
, 1);
432 insns
= btrace_insn_number (&insn
);
437 gaps
= btinfo
->ngaps
;
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns
, calls
, gaps
,
442 tp
->num
, target_pid_to_str (tp
->ptid
));
444 if (btrace_is_replaying (tp
))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo
->replay
));
449 /* Print a decode error. */
452 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
453 enum btrace_format format
)
458 errstr
= _("unknown");
466 case BTRACE_FORMAT_BTS
:
472 case BDE_BTS_OVERFLOW
:
473 errstr
= _("instruction overflow");
476 case BDE_BTS_INSN_SIZE
:
477 errstr
= _("unknown instruction");
482 #if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT
:
486 case BDE_PT_USER_QUIT
:
488 errstr
= _("trace decode cancelled");
491 case BDE_PT_DISABLED
:
493 errstr
= _("disabled");
496 case BDE_PT_OVERFLOW
:
498 errstr
= _("overflow");
503 errstr
= pt_errstr (pt_errcode (errcode
));
507 #endif /* defined (HAVE_LIBIPT) */
510 ui_out_text (uiout
, _("["));
513 ui_out_text (uiout
, _("decode error ("));
514 ui_out_field_int (uiout
, "errcode", errcode
);
515 ui_out_text (uiout
, _("): "));
517 ui_out_text (uiout
, errstr
);
518 ui_out_text (uiout
, _("]\n"));
521 /* Print an unsigned int. */
524 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
526 ui_out_field_fmt (uiout
, fld
, "%u", val
);
529 /* A range of source lines. */
531 struct btrace_line_range
533 /* The symtab this line is from. */
534 struct symtab
*symtab
;
536 /* The first line (inclusive). */
539 /* The last line (exclusive). */
543 /* Construct a line range. */
545 static struct btrace_line_range
546 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
548 struct btrace_line_range range
;
550 range
.symtab
= symtab
;
557 /* Add a line to a line range. */
559 static struct btrace_line_range
560 btrace_line_range_add (struct btrace_line_range range
, int line
)
562 if (range
.end
<= range
.begin
)
564 /* This is the first entry. */
566 range
.end
= line
+ 1;
568 else if (line
< range
.begin
)
570 else if (range
.end
< line
)
576 /* Return non-zero if RANGE is empty, zero otherwise. */
579 btrace_line_range_is_empty (struct btrace_line_range range
)
581 return range
.end
<= range
.begin
;
584 /* Return non-zero if LHS contains RHS, zero otherwise. */
587 btrace_line_range_contains_range (struct btrace_line_range lhs
,
588 struct btrace_line_range rhs
)
590 return ((lhs
.symtab
== rhs
.symtab
)
591 && (lhs
.begin
<= rhs
.begin
)
592 && (rhs
.end
<= lhs
.end
));
595 /* Find the line range associated with PC. */
597 static struct btrace_line_range
598 btrace_find_line_range (CORE_ADDR pc
)
600 struct btrace_line_range range
;
601 struct linetable_entry
*lines
;
602 struct linetable
*ltable
;
603 struct symtab
*symtab
;
606 symtab
= find_pc_line_symtab (pc
);
608 return btrace_mk_line_range (NULL
, 0, 0);
610 ltable
= SYMTAB_LINETABLE (symtab
);
612 return btrace_mk_line_range (symtab
, 0, 0);
614 nlines
= ltable
->nitems
;
615 lines
= ltable
->item
;
617 return btrace_mk_line_range (symtab
, 0, 0);
619 range
= btrace_mk_line_range (symtab
, 0, 0);
620 for (i
= 0; i
< nlines
- 1; i
++)
622 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0))
623 range
= btrace_line_range_add (range
, lines
[i
].line
);
629 /* Print source lines in LINES to UIOUT.
631 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
632 instructions corresponding to that source line. When printing a new source
633 line, we do the cleanups for the open chain and open a new cleanup chain for
634 the new source line. If the source line range in LINES is not empty, this
635 function will leave the cleanup chain for the last printed source line open
636 so instructions can be added to it. */
639 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
640 struct cleanup
**ui_item_chain
, int flags
)
642 enum print_source_lines_flags psl_flags
;
646 if (flags
& DISASSEMBLY_FILENAME
)
647 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
649 for (line
= lines
.begin
; line
< lines
.end
; ++line
)
651 if (*ui_item_chain
!= NULL
)
652 do_cleanups (*ui_item_chain
);
655 = make_cleanup_ui_out_tuple_begin_end (uiout
, "src_and_asm_line");
657 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
659 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
663 /* Disassemble a section of the recorded instruction trace. */
666 btrace_insn_history (struct ui_out
*uiout
,
667 const struct btrace_thread_info
*btinfo
,
668 const struct btrace_insn_iterator
*begin
,
669 const struct btrace_insn_iterator
*end
, int flags
)
672 struct cleanup
*cleanups
, *ui_item_chain
;
673 struct disassemble_info di
;
674 struct gdbarch
*gdbarch
;
675 struct btrace_insn_iterator it
;
676 struct btrace_line_range last_lines
;
678 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
679 btrace_insn_number (end
));
681 flags
|= DISASSEMBLY_SPECULATIVE
;
683 gdbarch
= target_gdbarch ();
684 stb
= mem_fileopen ();
685 cleanups
= make_cleanup_ui_file_delete (stb
);
686 di
= gdb_disassemble_info (gdbarch
, stb
);
687 last_lines
= btrace_mk_line_range (NULL
, 0, 0);
689 make_cleanup_ui_out_list_begin_end (uiout
, "asm_insns");
691 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
692 instructions corresponding to that line. */
693 ui_item_chain
= NULL
;
695 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
697 const struct btrace_insn
*insn
;
699 insn
= btrace_insn_get (&it
);
701 /* A NULL instruction indicates a gap in the trace. */
704 const struct btrace_config
*conf
;
706 conf
= btrace_conf (btinfo
);
708 /* We have trace so we must have a configuration. */
709 gdb_assert (conf
!= NULL
);
711 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
716 struct disasm_insn dinsn
;
718 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
720 struct btrace_line_range lines
;
722 lines
= btrace_find_line_range (insn
->pc
);
723 if (!btrace_line_range_is_empty (lines
)
724 && !btrace_line_range_contains_range (last_lines
, lines
))
726 btrace_print_lines (lines
, uiout
, &ui_item_chain
, flags
);
729 else if (ui_item_chain
== NULL
)
732 = make_cleanup_ui_out_tuple_begin_end (uiout
,
734 /* No source information. */
735 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
738 gdb_assert (ui_item_chain
!= NULL
);
741 memset (&dinsn
, 0, sizeof (dinsn
));
742 dinsn
.number
= btrace_insn_number (&it
);
743 dinsn
.addr
= insn
->pc
;
745 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
746 dinsn
.is_speculative
= 1;
748 gdb_pretty_print_insn (gdbarch
, uiout
, &di
, &dinsn
, flags
, stb
);
752 do_cleanups (cleanups
);
755 /* The to_insn_history method of target record-btrace. */
758 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
760 struct btrace_thread_info
*btinfo
;
761 struct btrace_insn_history
*history
;
762 struct btrace_insn_iterator begin
, end
;
763 struct cleanup
*uiout_cleanup
;
764 struct ui_out
*uiout
;
765 unsigned int context
, covered
;
767 uiout
= current_uiout
;
768 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
770 context
= abs (size
);
772 error (_("Bad record instruction-history-size."));
774 btinfo
= require_btrace ();
775 history
= btinfo
->insn_history
;
778 struct btrace_insn_iterator
*replay
;
780 DEBUG ("insn-history (0x%x): %d", flags
, size
);
782 /* If we're replaying, we start at the replay position. Otherwise, we
783 start at the tail of the trace. */
784 replay
= btinfo
->replay
;
788 btrace_insn_end (&begin
, btinfo
);
790 /* We start from here and expand in the requested direction. Then we
791 expand in the other direction, as well, to fill up any remaining
796 /* We want the current position covered, as well. */
797 covered
= btrace_insn_next (&end
, 1);
798 covered
+= btrace_insn_prev (&begin
, context
- covered
);
799 covered
+= btrace_insn_next (&end
, context
- covered
);
803 covered
= btrace_insn_next (&end
, context
);
804 covered
+= btrace_insn_prev (&begin
, context
- covered
);
809 begin
= history
->begin
;
812 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
813 btrace_insn_number (&begin
), btrace_insn_number (&end
));
818 covered
= btrace_insn_prev (&begin
, context
);
823 covered
= btrace_insn_next (&end
, context
);
828 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
832 printf_unfiltered (_("At the start of the branch trace record.\n"));
834 printf_unfiltered (_("At the end of the branch trace record.\n"));
837 btrace_set_insn_history (btinfo
, &begin
, &end
);
838 do_cleanups (uiout_cleanup
);
841 /* The to_insn_history_range method of target record-btrace. */
844 record_btrace_insn_history_range (struct target_ops
*self
,
845 ULONGEST from
, ULONGEST to
, int flags
)
847 struct btrace_thread_info
*btinfo
;
848 struct btrace_insn_history
*history
;
849 struct btrace_insn_iterator begin
, end
;
850 struct cleanup
*uiout_cleanup
;
851 struct ui_out
*uiout
;
852 unsigned int low
, high
;
855 uiout
= current_uiout
;
856 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
861 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
863 /* Check for wrap-arounds. */
864 if (low
!= from
|| high
!= to
)
865 error (_("Bad range."));
868 error (_("Bad range."));
870 btinfo
= require_btrace ();
872 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
874 error (_("Range out of bounds."));
876 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
879 /* Silently truncate the range. */
880 btrace_insn_end (&end
, btinfo
);
884 /* We want both begin and end to be inclusive. */
885 btrace_insn_next (&end
, 1);
888 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
889 btrace_set_insn_history (btinfo
, &begin
, &end
);
891 do_cleanups (uiout_cleanup
);
894 /* The to_insn_history_from method of target record-btrace. */
897 record_btrace_insn_history_from (struct target_ops
*self
,
898 ULONGEST from
, int size
, int flags
)
900 ULONGEST begin
, end
, context
;
902 context
= abs (size
);
904 error (_("Bad record instruction-history-size."));
913 begin
= from
- context
+ 1;
918 end
= from
+ context
- 1;
920 /* Check for wrap-around. */
925 record_btrace_insn_history_range (self
, begin
, end
, flags
);
928 /* Print the instruction number range for a function call history line. */
931 btrace_call_history_insn_range (struct ui_out
*uiout
,
932 const struct btrace_function
*bfun
)
934 unsigned int begin
, end
, size
;
936 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
937 gdb_assert (size
> 0);
939 begin
= bfun
->insn_offset
;
940 end
= begin
+ size
- 1;
942 ui_out_field_uint (uiout
, "insn begin", begin
);
943 ui_out_text (uiout
, ",");
944 ui_out_field_uint (uiout
, "insn end", end
);
947 /* Compute the lowest and highest source line for the instructions in BFUN
948 and return them in PBEGIN and PEND.
949 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
950 result from inlining or macro expansion. */
953 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
954 int *pbegin
, int *pend
)
956 struct btrace_insn
*insn
;
957 struct symtab
*symtab
;
969 symtab
= symbol_symtab (sym
);
971 for (idx
= 0; VEC_iterate (btrace_insn_s
, bfun
->insn
, idx
, insn
); ++idx
)
973 struct symtab_and_line sal
;
975 sal
= find_pc_line (insn
->pc
, 0);
976 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
979 begin
= min (begin
, sal
.line
);
980 end
= max (end
, sal
.line
);
988 /* Print the source line information for a function call history line. */
991 btrace_call_history_src_line (struct ui_out
*uiout
,
992 const struct btrace_function
*bfun
)
1001 ui_out_field_string (uiout
, "file",
1002 symtab_to_filename_for_display (symbol_symtab (sym
)));
1004 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1008 ui_out_text (uiout
, ":");
1009 ui_out_field_int (uiout
, "min line", begin
);
1014 ui_out_text (uiout
, ",");
1015 ui_out_field_int (uiout
, "max line", end
);
1018 /* Get the name of a branch trace function. */
1021 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1023 struct minimal_symbol
*msym
;
1033 return SYMBOL_PRINT_NAME (sym
);
1034 else if (msym
!= NULL
)
1035 return MSYMBOL_PRINT_NAME (msym
);
1040 /* Disassemble a section of the recorded function trace. */
1043 btrace_call_history (struct ui_out
*uiout
,
1044 const struct btrace_thread_info
*btinfo
,
1045 const struct btrace_call_iterator
*begin
,
1046 const struct btrace_call_iterator
*end
,
1047 enum record_print_flag flags
)
1049 struct btrace_call_iterator it
;
1051 DEBUG ("ftrace (0x%x): [%u; %u)", flags
, btrace_call_number (begin
),
1052 btrace_call_number (end
));
1054 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1056 const struct btrace_function
*bfun
;
1057 struct minimal_symbol
*msym
;
1060 bfun
= btrace_call_get (&it
);
1064 /* Print the function index. */
1065 ui_out_field_uint (uiout
, "index", bfun
->number
);
1066 ui_out_text (uiout
, "\t");
1068 /* Indicate gaps in the trace. */
1069 if (bfun
->errcode
!= 0)
1071 const struct btrace_config
*conf
;
1073 conf
= btrace_conf (btinfo
);
1075 /* We have trace so we must have a configuration. */
1076 gdb_assert (conf
!= NULL
);
1078 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1083 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1085 int level
= bfun
->level
+ btinfo
->level
, i
;
1087 for (i
= 0; i
< level
; ++i
)
1088 ui_out_text (uiout
, " ");
1092 ui_out_field_string (uiout
, "function", SYMBOL_PRINT_NAME (sym
));
1093 else if (msym
!= NULL
)
1094 ui_out_field_string (uiout
, "function", MSYMBOL_PRINT_NAME (msym
));
1095 else if (!ui_out_is_mi_like_p (uiout
))
1096 ui_out_field_string (uiout
, "function", "??");
1098 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1100 ui_out_text (uiout
, _("\tinst "));
1101 btrace_call_history_insn_range (uiout
, bfun
);
1104 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1106 ui_out_text (uiout
, _("\tat "));
1107 btrace_call_history_src_line (uiout
, bfun
);
1110 ui_out_text (uiout
, "\n");
1114 /* The to_call_history method of target record-btrace. */
1117 record_btrace_call_history (struct target_ops
*self
, int size
, int flags
)
1119 struct btrace_thread_info
*btinfo
;
1120 struct btrace_call_history
*history
;
1121 struct btrace_call_iterator begin
, end
;
1122 struct cleanup
*uiout_cleanup
;
1123 struct ui_out
*uiout
;
1124 unsigned int context
, covered
;
1126 uiout
= current_uiout
;
1127 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1129 context
= abs (size
);
1131 error (_("Bad record function-call-history-size."));
1133 btinfo
= require_btrace ();
1134 history
= btinfo
->call_history
;
1135 if (history
== NULL
)
1137 struct btrace_insn_iterator
*replay
;
1139 DEBUG ("call-history (0x%x): %d", flags
, size
);
1141 /* If we're replaying, we start at the replay position. Otherwise, we
1142 start at the tail of the trace. */
1143 replay
= btinfo
->replay
;
1146 begin
.function
= replay
->function
;
1147 begin
.btinfo
= btinfo
;
1150 btrace_call_end (&begin
, btinfo
);
1152 /* We start from here and expand in the requested direction. Then we
1153 expand in the other direction, as well, to fill up any remaining
1158 /* We want the current position covered, as well. */
1159 covered
= btrace_call_next (&end
, 1);
1160 covered
+= btrace_call_prev (&begin
, context
- covered
);
1161 covered
+= btrace_call_next (&end
, context
- covered
);
1165 covered
= btrace_call_next (&end
, context
);
1166 covered
+= btrace_call_prev (&begin
, context
- covered
);
1171 begin
= history
->begin
;
1174 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
1175 btrace_call_number (&begin
), btrace_call_number (&end
));
1180 covered
= btrace_call_prev (&begin
, context
);
1185 covered
= btrace_call_next (&end
, context
);
1190 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1194 printf_unfiltered (_("At the start of the branch trace record.\n"));
1196 printf_unfiltered (_("At the end of the branch trace record.\n"));
1199 btrace_set_call_history (btinfo
, &begin
, &end
);
1200 do_cleanups (uiout_cleanup
);
1203 /* The to_call_history_range method of target record-btrace. */
1206 record_btrace_call_history_range (struct target_ops
*self
,
1207 ULONGEST from
, ULONGEST to
, int flags
)
1209 struct btrace_thread_info
*btinfo
;
1210 struct btrace_call_history
*history
;
1211 struct btrace_call_iterator begin
, end
;
1212 struct cleanup
*uiout_cleanup
;
1213 struct ui_out
*uiout
;
1214 unsigned int low
, high
;
1217 uiout
= current_uiout
;
1218 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1223 DEBUG ("call-history (0x%x): [%u; %u)", flags
, low
, high
);
1225 /* Check for wrap-arounds. */
1226 if (low
!= from
|| high
!= to
)
1227 error (_("Bad range."));
1230 error (_("Bad range."));
1232 btinfo
= require_btrace ();
1234 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1236 error (_("Range out of bounds."));
1238 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1241 /* Silently truncate the range. */
1242 btrace_call_end (&end
, btinfo
);
1246 /* We want both begin and end to be inclusive. */
1247 btrace_call_next (&end
, 1);
1250 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1251 btrace_set_call_history (btinfo
, &begin
, &end
);
1253 do_cleanups (uiout_cleanup
);
1256 /* The to_call_history_from method of target record-btrace. */
1259 record_btrace_call_history_from (struct target_ops
*self
,
1260 ULONGEST from
, int size
, int flags
)
1262 ULONGEST begin
, end
, context
;
1264 context
= abs (size
);
1266 error (_("Bad record function-call-history-size."));
1275 begin
= from
- context
+ 1;
1280 end
= from
+ context
- 1;
1282 /* Check for wrap-around. */
1287 record_btrace_call_history_range (self
, begin
, end
, flags
);
1290 /* The to_record_is_replaying method of target record-btrace. */
1293 record_btrace_is_replaying (struct target_ops
*self
, ptid_t ptid
)
1295 struct thread_info
*tp
;
1297 ALL_NON_EXITED_THREADS (tp
)
1298 if (ptid_match (tp
->ptid
, ptid
) && btrace_is_replaying (tp
))
1304 /* The to_record_will_replay method of target record-btrace. */
1307 record_btrace_will_replay (struct target_ops
*self
, ptid_t ptid
, int dir
)
1309 return dir
== EXEC_REVERSE
|| record_btrace_is_replaying (self
, ptid
);
1312 /* The to_xfer_partial method of target record-btrace. */
1314 static enum target_xfer_status
1315 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1316 const char *annex
, gdb_byte
*readbuf
,
1317 const gdb_byte
*writebuf
, ULONGEST offset
,
1318 ULONGEST len
, ULONGEST
*xfered_len
)
1320 struct target_ops
*t
;
1322 /* Filter out requests that don't make sense during replay. */
1323 if (replay_memory_access
== replay_memory_access_read_only
1324 && !record_btrace_generating_corefile
1325 && record_btrace_is_replaying (ops
, inferior_ptid
))
1329 case TARGET_OBJECT_MEMORY
:
1331 struct target_section
*section
;
1333 /* We do not allow writing memory in general. */
1334 if (writebuf
!= NULL
)
1337 return TARGET_XFER_UNAVAILABLE
;
1340 /* We allow reading readonly memory. */
1341 section
= target_section_by_addr (ops
, offset
);
1342 if (section
!= NULL
)
1344 /* Check if the section we found is readonly. */
1345 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1346 section
->the_bfd_section
)
1347 & SEC_READONLY
) != 0)
1349 /* Truncate the request to fit into this section. */
1350 len
= min (len
, section
->endaddr
- offset
);
1356 return TARGET_XFER_UNAVAILABLE
;
1361 /* Forward the request. */
1363 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1364 offset
, len
, xfered_len
);
1367 /* The to_insert_breakpoint method of target record-btrace. */
1370 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1371 struct gdbarch
*gdbarch
,
1372 struct bp_target_info
*bp_tgt
)
1377 /* Inserting breakpoints requires accessing memory. Allow it for the
1378 duration of this function. */
1379 old
= replay_memory_access
;
1380 replay_memory_access
= replay_memory_access_read_write
;
1385 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1387 CATCH (except
, RETURN_MASK_ALL
)
1389 replay_memory_access
= old
;
1390 throw_exception (except
);
1393 replay_memory_access
= old
;
1398 /* The to_remove_breakpoint method of target record-btrace. */
1401 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1402 struct gdbarch
*gdbarch
,
1403 struct bp_target_info
*bp_tgt
)
1408 /* Removing breakpoints requires accessing memory. Allow it for the
1409 duration of this function. */
1410 old
= replay_memory_access
;
1411 replay_memory_access
= replay_memory_access_read_write
;
1416 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1418 CATCH (except
, RETURN_MASK_ALL
)
1420 replay_memory_access
= old
;
1421 throw_exception (except
);
1424 replay_memory_access
= old
;
1429 /* The to_fetch_registers method of target record-btrace. */
1432 record_btrace_fetch_registers (struct target_ops
*ops
,
1433 struct regcache
*regcache
, int regno
)
1435 struct btrace_insn_iterator
*replay
;
1436 struct thread_info
*tp
;
1438 tp
= find_thread_ptid (inferior_ptid
);
1439 gdb_assert (tp
!= NULL
);
1441 replay
= tp
->btrace
.replay
;
1442 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1444 const struct btrace_insn
*insn
;
1445 struct gdbarch
*gdbarch
;
1448 gdbarch
= get_regcache_arch (regcache
);
1449 pcreg
= gdbarch_pc_regnum (gdbarch
);
1453 /* We can only provide the PC register. */
1454 if (regno
>= 0 && regno
!= pcreg
)
1457 insn
= btrace_insn_get (replay
);
1458 gdb_assert (insn
!= NULL
);
1460 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1464 struct target_ops
*t
= ops
->beneath
;
1466 t
->to_fetch_registers (t
, regcache
, regno
);
1470 /* The to_store_registers method of target record-btrace. */
1473 record_btrace_store_registers (struct target_ops
*ops
,
1474 struct regcache
*regcache
, int regno
)
1476 struct target_ops
*t
;
1478 if (!record_btrace_generating_corefile
1479 && record_btrace_is_replaying (ops
, inferior_ptid
))
1480 error (_("Cannot write registers while replaying."));
1482 gdb_assert (may_write_registers
!= 0);
1485 t
->to_store_registers (t
, regcache
, regno
);
1488 /* The to_prepare_to_store method of target record-btrace. */
1491 record_btrace_prepare_to_store (struct target_ops
*ops
,
1492 struct regcache
*regcache
)
1494 struct target_ops
*t
;
1496 if (!record_btrace_generating_corefile
1497 && record_btrace_is_replaying (ops
, inferior_ptid
))
1501 t
->to_prepare_to_store (t
, regcache
);
1504 /* The branch trace frame cache. */
1506 struct btrace_frame_cache
1509 struct thread_info
*tp
;
1511 /* The frame info. */
1512 struct frame_info
*frame
;
1514 /* The branch trace function segment. */
1515 const struct btrace_function
*bfun
;
1518 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1520 static htab_t bfcache
;
1522 /* hash_f for htab_create_alloc of bfcache. */
1525 bfcache_hash (const void *arg
)
1527 const struct btrace_frame_cache
*cache
1528 = (const struct btrace_frame_cache
*) arg
;
1530 return htab_hash_pointer (cache
->frame
);
1533 /* eq_f for htab_create_alloc of bfcache. */
1536 bfcache_eq (const void *arg1
, const void *arg2
)
1538 const struct btrace_frame_cache
*cache1
1539 = (const struct btrace_frame_cache
*) arg1
;
1540 const struct btrace_frame_cache
*cache2
1541 = (const struct btrace_frame_cache
*) arg2
;
1543 return cache1
->frame
== cache2
->frame
;
1546 /* Create a new btrace frame cache. */
1548 static struct btrace_frame_cache
*
1549 bfcache_new (struct frame_info
*frame
)
1551 struct btrace_frame_cache
*cache
;
1554 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1555 cache
->frame
= frame
;
1557 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1558 gdb_assert (*slot
== NULL
);
1564 /* Extract the branch trace function from a branch trace frame. */
1566 static const struct btrace_function
*
1567 btrace_get_frame_function (struct frame_info
*frame
)
1569 const struct btrace_frame_cache
*cache
;
1570 const struct btrace_function
*bfun
;
1571 struct btrace_frame_cache pattern
;
1574 pattern
.frame
= frame
;
1576 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1580 cache
= (const struct btrace_frame_cache
*) *slot
;
1584 /* Implement stop_reason method for record_btrace_frame_unwind. */
1586 static enum unwind_stop_reason
1587 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1590 const struct btrace_frame_cache
*cache
;
1591 const struct btrace_function
*bfun
;
1593 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1595 gdb_assert (bfun
!= NULL
);
1597 if (bfun
->up
== NULL
)
1598 return UNWIND_UNAVAILABLE
;
1600 return UNWIND_NO_REASON
;
1603 /* Implement this_id method for record_btrace_frame_unwind. */
1606 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1607 struct frame_id
*this_id
)
1609 const struct btrace_frame_cache
*cache
;
1610 const struct btrace_function
*bfun
;
1611 CORE_ADDR code
, special
;
1613 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1616 gdb_assert (bfun
!= NULL
);
1618 while (bfun
->segment
.prev
!= NULL
)
1619 bfun
= bfun
->segment
.prev
;
1621 code
= get_frame_func (this_frame
);
1622 special
= bfun
->number
;
1624 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1626 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1627 btrace_get_bfun_name (cache
->bfun
),
1628 core_addr_to_string_nz (this_id
->code_addr
),
1629 core_addr_to_string_nz (this_id
->special_addr
));
1632 /* Implement prev_register method for record_btrace_frame_unwind. */
1634 static struct value
*
1635 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1639 const struct btrace_frame_cache
*cache
;
1640 const struct btrace_function
*bfun
, *caller
;
1641 const struct btrace_insn
*insn
;
1642 struct gdbarch
*gdbarch
;
1646 gdbarch
= get_frame_arch (this_frame
);
1647 pcreg
= gdbarch_pc_regnum (gdbarch
);
1648 if (pcreg
< 0 || regnum
!= pcreg
)
1649 throw_error (NOT_AVAILABLE_ERROR
,
1650 _("Registers are not available in btrace record history"));
1652 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1654 gdb_assert (bfun
!= NULL
);
1658 throw_error (NOT_AVAILABLE_ERROR
,
1659 _("No caller in btrace record history"));
1661 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1663 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1668 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1671 pc
+= gdb_insn_length (gdbarch
, pc
);
1674 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1675 btrace_get_bfun_name (bfun
), bfun
->level
,
1676 core_addr_to_string_nz (pc
));
1678 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1681 /* Implement sniffer method for record_btrace_frame_unwind. */
1684 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1685 struct frame_info
*this_frame
,
1688 const struct btrace_function
*bfun
;
1689 struct btrace_frame_cache
*cache
;
1690 struct thread_info
*tp
;
1691 struct frame_info
*next
;
1693 /* THIS_FRAME does not contain a reference to its thread. */
1694 tp
= find_thread_ptid (inferior_ptid
);
1695 gdb_assert (tp
!= NULL
);
1698 next
= get_next_frame (this_frame
);
1701 const struct btrace_insn_iterator
*replay
;
1703 replay
= tp
->btrace
.replay
;
1705 bfun
= replay
->function
;
1709 const struct btrace_function
*callee
;
1711 callee
= btrace_get_frame_function (next
);
1712 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1719 DEBUG ("[frame] sniffed frame for %s on level %d",
1720 btrace_get_bfun_name (bfun
), bfun
->level
);
1722 /* This is our frame. Initialize the frame cache. */
1723 cache
= bfcache_new (this_frame
);
1727 *this_cache
= cache
;
1731 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1734 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1735 struct frame_info
*this_frame
,
1738 const struct btrace_function
*bfun
, *callee
;
1739 struct btrace_frame_cache
*cache
;
1740 struct frame_info
*next
;
1742 next
= get_next_frame (this_frame
);
1746 callee
= btrace_get_frame_function (next
);
1750 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1757 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1758 btrace_get_bfun_name (bfun
), bfun
->level
);
1760 /* This is our frame. Initialize the frame cache. */
1761 cache
= bfcache_new (this_frame
);
1762 cache
->tp
= find_thread_ptid (inferior_ptid
);
1765 *this_cache
= cache
;
1770 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1772 struct btrace_frame_cache
*cache
;
1775 cache
= (struct btrace_frame_cache
*) this_cache
;
1777 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1778 gdb_assert (slot
!= NULL
);
1780 htab_remove_elt (bfcache
, cache
);
1783 /* btrace recording does not store previous memory content, neither the stack
1784 frames content. Any unwinding would return errorneous results as the stack
1785 contents no longer matches the changed PC value restored from history.
1786 Therefore this unwinder reports any possibly unwound registers as
1789 const struct frame_unwind record_btrace_frame_unwind
=
1792 record_btrace_frame_unwind_stop_reason
,
1793 record_btrace_frame_this_id
,
1794 record_btrace_frame_prev_register
,
1796 record_btrace_frame_sniffer
,
1797 record_btrace_frame_dealloc_cache
1800 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1803 record_btrace_frame_unwind_stop_reason
,
1804 record_btrace_frame_this_id
,
1805 record_btrace_frame_prev_register
,
1807 record_btrace_tailcall_frame_sniffer
,
1808 record_btrace_frame_dealloc_cache
1811 /* Implement the to_get_unwinder method. */
1813 static const struct frame_unwind
*
1814 record_btrace_to_get_unwinder (struct target_ops
*self
)
1816 return &record_btrace_frame_unwind
;
1819 /* Implement the to_get_tailcall_unwinder method. */
1821 static const struct frame_unwind
*
1822 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1824 return &record_btrace_tailcall_frame_unwind
;
1827 /* Return a human-readable string for FLAG. */
1830 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1838 return "reverse-step";
1844 return "reverse-cont";
1853 /* Indicate that TP should be resumed according to FLAG. */
1856 record_btrace_resume_thread (struct thread_info
*tp
,
1857 enum btrace_thread_flag flag
)
1859 struct btrace_thread_info
*btinfo
;
1861 DEBUG ("resuming thread %d (%s): %x (%s)", tp
->num
,
1862 target_pid_to_str (tp
->ptid
), flag
, btrace_thread_flag_to_str (flag
));
1864 btinfo
= &tp
->btrace
;
1866 /* Fetch the latest branch trace. */
1869 /* A resume request overwrites a preceding resume or stop request. */
1870 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1871 btinfo
->flags
|= flag
;
1874 /* Get the current frame for TP. */
1876 static struct frame_info
*
1877 get_thread_current_frame (struct thread_info
*tp
)
1879 struct frame_info
*frame
;
1880 ptid_t old_inferior_ptid
;
1883 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1884 old_inferior_ptid
= inferior_ptid
;
1885 inferior_ptid
= tp
->ptid
;
1887 /* Clear the executing flag to allow changes to the current frame.
1888 We are not actually running, yet. We just started a reverse execution
1889 command or a record goto command.
1890 For the latter, EXECUTING is false and this has no effect.
1891 For the former, EXECUTING is true and we're in to_wait, about to
1892 move the thread. Since we need to recompute the stack, we temporarily
1893 set EXECUTING to flase. */
1894 executing
= is_executing (inferior_ptid
);
1895 set_executing (inferior_ptid
, 0);
1900 frame
= get_current_frame ();
1902 CATCH (except
, RETURN_MASK_ALL
)
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid
, executing
);
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid
= old_inferior_ptid
;
1910 throw_exception (except
);
1914 /* Restore the previous execution state. */
1915 set_executing (inferior_ptid
, executing
);
1917 /* Restore the previous inferior_ptid. */
1918 inferior_ptid
= old_inferior_ptid
;
1923 /* Start replaying a thread. */
1925 static struct btrace_insn_iterator
*
1926 record_btrace_start_replaying (struct thread_info
*tp
)
1928 struct btrace_insn_iterator
*replay
;
1929 struct btrace_thread_info
*btinfo
;
1931 btinfo
= &tp
->btrace
;
1934 /* We can't start replaying without trace. */
1935 if (btinfo
->begin
== NULL
)
1938 /* GDB stores the current frame_id when stepping in order to detects steps
1940 Since frames are computed differently when we're replaying, we need to
1941 recompute those stored frames and fix them up so we can still detect
1942 subroutines after we started replaying. */
1945 struct frame_info
*frame
;
1946 struct frame_id frame_id
;
1947 int upd_step_frame_id
, upd_step_stack_frame_id
;
1949 /* The current frame without replaying - computed via normal unwind. */
1950 frame
= get_thread_current_frame (tp
);
1951 frame_id
= get_frame_id (frame
);
1953 /* Check if we need to update any stepping-related frame id's. */
1954 upd_step_frame_id
= frame_id_eq (frame_id
,
1955 tp
->control
.step_frame_id
);
1956 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1957 tp
->control
.step_stack_frame_id
);
1959 /* We start replaying at the end of the branch trace. This corresponds
1960 to the current instruction. */
1961 replay
= XNEW (struct btrace_insn_iterator
);
1962 btrace_insn_end (replay
, btinfo
);
1964 /* Skip gaps at the end of the trace. */
1965 while (btrace_insn_get (replay
) == NULL
)
1969 steps
= btrace_insn_prev (replay
, 1);
1971 error (_("No trace."));
1974 /* We're not replaying, yet. */
1975 gdb_assert (btinfo
->replay
== NULL
);
1976 btinfo
->replay
= replay
;
1978 /* Make sure we're not using any stale registers. */
1979 registers_changed_ptid (tp
->ptid
);
1981 /* The current frame with replaying - computed via btrace unwind. */
1982 frame
= get_thread_current_frame (tp
);
1983 frame_id
= get_frame_id (frame
);
1985 /* Replace stepping related frames where necessary. */
1986 if (upd_step_frame_id
)
1987 tp
->control
.step_frame_id
= frame_id
;
1988 if (upd_step_stack_frame_id
)
1989 tp
->control
.step_stack_frame_id
= frame_id
;
1991 CATCH (except
, RETURN_MASK_ALL
)
1993 xfree (btinfo
->replay
);
1994 btinfo
->replay
= NULL
;
1996 registers_changed_ptid (tp
->ptid
);
1998 throw_exception (except
);
2005 /* Stop replaying a thread. */
2008 record_btrace_stop_replaying (struct thread_info
*tp
)
2010 struct btrace_thread_info
*btinfo
;
2012 btinfo
= &tp
->btrace
;
2014 xfree (btinfo
->replay
);
2015 btinfo
->replay
= NULL
;
2017 /* Make sure we're not leaving any stale registers. */
2018 registers_changed_ptid (tp
->ptid
);
2021 /* Stop replaying TP if it is at the end of its execution history. */
2024 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2026 struct btrace_insn_iterator
*replay
, end
;
2027 struct btrace_thread_info
*btinfo
;
2029 btinfo
= &tp
->btrace
;
2030 replay
= btinfo
->replay
;
2035 btrace_insn_end (&end
, btinfo
);
2037 if (btrace_insn_cmp (replay
, &end
) == 0)
2038 record_btrace_stop_replaying (tp
);
2041 /* The to_resume method of target record-btrace. */
2044 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
2045 enum gdb_signal signal
)
2047 struct thread_info
*tp
;
2048 enum btrace_thread_flag flag
, cflag
;
2050 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
),
2051 execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2052 step
? "step" : "cont");
2054 /* Store the execution direction of the last resume.
2056 If there is more than one to_resume call, we have to rely on infrun
2057 to not change the execution direction in-between. */
2058 record_btrace_resume_exec_dir
= execution_direction
;
2060 /* As long as we're not replaying, just forward the request.
2062 For non-stop targets this means that no thread is replaying. In order to
2063 make progress, we may need to explicitly move replaying threads to the end
2064 of their execution history. */
2065 if ((execution_direction
!= EXEC_REVERSE
)
2066 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2069 ops
->to_resume (ops
, ptid
, step
, signal
);
2073 /* Compute the btrace thread flag for the requested move. */
2074 if (execution_direction
== EXEC_REVERSE
)
2076 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2081 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2085 /* We just indicate the resume intent here. The actual stepping happens in
2086 record_btrace_wait below.
2088 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2089 if (!target_is_non_stop_p ())
2091 gdb_assert (ptid_match (inferior_ptid
, ptid
));
2093 ALL_NON_EXITED_THREADS (tp
)
2094 if (ptid_match (tp
->ptid
, ptid
))
2096 if (ptid_match (tp
->ptid
, inferior_ptid
))
2097 record_btrace_resume_thread (tp
, flag
);
2099 record_btrace_resume_thread (tp
, cflag
);
2104 ALL_NON_EXITED_THREADS (tp
)
2105 if (ptid_match (tp
->ptid
, ptid
))
2106 record_btrace_resume_thread (tp
, flag
);
2109 /* Async support. */
2110 if (target_can_async_p ())
2113 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2117 /* Cancel resuming TP. */
2120 record_btrace_cancel_resume (struct thread_info
*tp
)
2122 enum btrace_thread_flag flags
;
2124 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2128 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp
->num
,
2129 target_pid_to_str (tp
->ptid
), flags
,
2130 btrace_thread_flag_to_str (flags
));
2132 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2133 record_btrace_stop_replaying_at_end (tp
);
2136 /* Return a target_waitstatus indicating that we ran out of history. */
2138 static struct target_waitstatus
2139 btrace_step_no_history (void)
2141 struct target_waitstatus status
;
2143 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2148 /* Return a target_waitstatus indicating that a step finished. */
2150 static struct target_waitstatus
2151 btrace_step_stopped (void)
2153 struct target_waitstatus status
;
2155 status
.kind
= TARGET_WAITKIND_STOPPED
;
2156 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2161 /* Return a target_waitstatus indicating that a thread was stopped as
2164 static struct target_waitstatus
2165 btrace_step_stopped_on_request (void)
2167 struct target_waitstatus status
;
2169 status
.kind
= TARGET_WAITKIND_STOPPED
;
2170 status
.value
.sig
= GDB_SIGNAL_0
;
2175 /* Return a target_waitstatus indicating a spurious stop. */
2177 static struct target_waitstatus
2178 btrace_step_spurious (void)
2180 struct target_waitstatus status
;
2182 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2187 /* Return a target_waitstatus indicating that the thread was not resumed. */
2189 static struct target_waitstatus
2190 btrace_step_no_resumed (void)
2192 struct target_waitstatus status
;
2194 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2199 /* Return a target_waitstatus indicating that we should wait again. */
2201 static struct target_waitstatus
2202 btrace_step_again (void)
2204 struct target_waitstatus status
;
2206 status
.kind
= TARGET_WAITKIND_IGNORE
;
2211 /* Clear the record histories. */
2214 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2216 xfree (btinfo
->insn_history
);
2217 xfree (btinfo
->call_history
);
2219 btinfo
->insn_history
= NULL
;
2220 btinfo
->call_history
= NULL
;
2223 /* Check whether TP's current replay position is at a breakpoint. */
2226 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2228 struct btrace_insn_iterator
*replay
;
2229 struct btrace_thread_info
*btinfo
;
2230 const struct btrace_insn
*insn
;
2231 struct inferior
*inf
;
2233 btinfo
= &tp
->btrace
;
2234 replay
= btinfo
->replay
;
2239 insn
= btrace_insn_get (replay
);
2243 inf
= find_inferior_ptid (tp
->ptid
);
2247 return record_check_stopped_by_breakpoint (inf
->aspace
, insn
->pc
,
2248 &btinfo
->stop_reason
);
2251 /* Step one instruction in forward direction. */
2253 static struct target_waitstatus
2254 record_btrace_single_step_forward (struct thread_info
*tp
)
2256 struct btrace_insn_iterator
*replay
, end
;
2257 struct btrace_thread_info
*btinfo
;
2259 btinfo
= &tp
->btrace
;
2260 replay
= btinfo
->replay
;
2262 /* We're done if we're not replaying. */
2264 return btrace_step_no_history ();
2266 /* Check if we're stepping a breakpoint. */
2267 if (record_btrace_replay_at_breakpoint (tp
))
2268 return btrace_step_stopped ();
2270 /* Skip gaps during replay. */
2275 /* We will bail out here if we continue stepping after reaching the end
2276 of the execution history. */
2277 steps
= btrace_insn_next (replay
, 1);
2279 return btrace_step_no_history ();
2281 while (btrace_insn_get (replay
) == NULL
);
2283 /* Determine the end of the instruction trace. */
2284 btrace_insn_end (&end
, btinfo
);
2286 /* The execution trace contains (and ends with) the current instruction.
2287 This instruction has not been executed, yet, so the trace really ends
2288 one instruction earlier. */
2289 if (btrace_insn_cmp (replay
, &end
) == 0)
2290 return btrace_step_no_history ();
2292 return btrace_step_spurious ();
2295 /* Step one instruction in backward direction. */
2297 static struct target_waitstatus
2298 record_btrace_single_step_backward (struct thread_info
*tp
)
2300 struct btrace_insn_iterator
*replay
;
2301 struct btrace_thread_info
*btinfo
;
2303 btinfo
= &tp
->btrace
;
2304 replay
= btinfo
->replay
;
2306 /* Start replaying if we're not already doing so. */
2308 replay
= record_btrace_start_replaying (tp
);
2310 /* If we can't step any further, we reached the end of the history.
2311 Skip gaps during replay. */
2316 steps
= btrace_insn_prev (replay
, 1);
2318 return btrace_step_no_history ();
2320 while (btrace_insn_get (replay
) == NULL
);
2322 /* Check if we're stepping a breakpoint.
2324 For reverse-stepping, this check is after the step. There is logic in
2325 infrun.c that handles reverse-stepping separately. See, for example,
2326 proceed and adjust_pc_after_break.
2328 This code assumes that for reverse-stepping, PC points to the last
2329 de-executed instruction, whereas for forward-stepping PC points to the
2330 next to-be-executed instruction. */
2331 if (record_btrace_replay_at_breakpoint (tp
))
2332 return btrace_step_stopped ();
2334 return btrace_step_spurious ();
2337 /* Step a single thread. */
2339 static struct target_waitstatus
2340 record_btrace_step_thread (struct thread_info
*tp
)
2342 struct btrace_thread_info
*btinfo
;
2343 struct target_waitstatus status
;
2344 enum btrace_thread_flag flags
;
2346 btinfo
= &tp
->btrace
;
2348 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2349 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2351 DEBUG ("stepping thread %d (%s): %x (%s)", tp
->num
,
2352 target_pid_to_str (tp
->ptid
), flags
,
2353 btrace_thread_flag_to_str (flags
));
2355 /* We can't step without an execution history. */
2356 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2357 return btrace_step_no_history ();
2362 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2365 return btrace_step_stopped_on_request ();
2368 status
= record_btrace_single_step_forward (tp
);
2369 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2372 return btrace_step_stopped ();
2375 status
= record_btrace_single_step_backward (tp
);
2376 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2379 return btrace_step_stopped ();
2382 status
= record_btrace_single_step_forward (tp
);
2383 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2386 btinfo
->flags
|= flags
;
2387 return btrace_step_again ();
2390 status
= record_btrace_single_step_backward (tp
);
2391 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2394 btinfo
->flags
|= flags
;
2395 return btrace_step_again ();
2398 /* We keep threads moving at the end of their execution history. The to_wait
2399 method will stop the thread for whom the event is reported. */
2400 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2401 btinfo
->flags
|= flags
;
2406 /* A vector of threads. */
2408 typedef struct thread_info
* tp_t
;
2411 /* Announce further events if necessary. */
2414 record_btrace_maybe_mark_async_event (const VEC (tp_t
) *moving
,
2415 const VEC (tp_t
) *no_history
)
2417 int more_moving
, more_no_history
;
2419 more_moving
= !VEC_empty (tp_t
, moving
);
2420 more_no_history
= !VEC_empty (tp_t
, no_history
);
2422 if (!more_moving
&& !more_no_history
)
2426 DEBUG ("movers pending");
2428 if (more_no_history
)
2429 DEBUG ("no-history pending");
2431 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2434 /* The to_wait method of target record-btrace. */
2437 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2438 struct target_waitstatus
*status
, int options
)
2440 VEC (tp_t
) *moving
, *no_history
;
2441 struct thread_info
*tp
, *eventing
;
2442 struct cleanup
*cleanups
= make_cleanup (null_cleanup
, NULL
);
2444 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2446 /* As long as we're not replaying, just forward the request. */
2447 if ((execution_direction
!= EXEC_REVERSE
)
2448 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2451 return ops
->to_wait (ops
, ptid
, status
, options
);
2457 make_cleanup (VEC_cleanup (tp_t
), &moving
);
2458 make_cleanup (VEC_cleanup (tp_t
), &no_history
);
2460 /* Keep a work list of moving threads. */
2461 ALL_NON_EXITED_THREADS (tp
)
2462 if (ptid_match (tp
->ptid
, ptid
)
2463 && ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0))
2464 VEC_safe_push (tp_t
, moving
, tp
);
2466 if (VEC_empty (tp_t
, moving
))
2468 *status
= btrace_step_no_resumed ();
2470 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
),
2471 target_waitstatus_to_string (status
));
2473 do_cleanups (cleanups
);
2477 /* Step moving threads one by one, one step each, until either one thread
2478 reports an event or we run out of threads to step.
2480 When stepping more than one thread, chances are that some threads reach
2481 the end of their execution history earlier than others. If we reported
2482 this immediately, all-stop on top of non-stop would stop all threads and
2483 resume the same threads next time. And we would report the same thread
2484 having reached the end of its execution history again.
2486 In the worst case, this would starve the other threads. But even if other
2487 threads would be allowed to make progress, this would result in far too
2488 many intermediate stops.
2490 We therefore delay the reporting of "no execution history" until we have
2491 nothing else to report. By this time, all threads should have moved to
2492 either the beginning or the end of their execution history. There will
2493 be a single user-visible stop. */
2495 while ((eventing
== NULL
) && !VEC_empty (tp_t
, moving
))
2500 while ((eventing
== NULL
) && VEC_iterate (tp_t
, moving
, ix
, tp
))
2502 *status
= record_btrace_step_thread (tp
);
2504 switch (status
->kind
)
2506 case TARGET_WAITKIND_IGNORE
:
2510 case TARGET_WAITKIND_NO_HISTORY
:
2511 VEC_safe_push (tp_t
, no_history
,
2512 VEC_ordered_remove (tp_t
, moving
, ix
));
2516 eventing
= VEC_unordered_remove (tp_t
, moving
, ix
);
2522 if (eventing
== NULL
)
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
2529 gdb_assert (!VEC_empty (tp_t
, no_history
));
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
2533 eventing
= VEC_unordered_remove (tp_t
, no_history
, 0);
2534 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2536 *status
= btrace_step_no_history ();
2539 gdb_assert (eventing
!= NULL
);
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing
);
2545 /* Stop all other threads. */
2546 if (!target_is_non_stop_p ())
2547 ALL_NON_EXITED_THREADS (tp
)
2548 record_btrace_cancel_resume (tp
);
2550 /* In async mode, we need to announce further events. */
2551 if (target_is_async_p ())
2552 record_btrace_maybe_mark_async_event (moving
, no_history
);
2554 /* Start record histories anew from the current position. */
2555 record_btrace_clear_histories (&eventing
->btrace
);
2557 /* We moved the replay position but did not update registers. */
2558 registers_changed_ptid (eventing
->ptid
);
2560 DEBUG ("wait ended by thread %d (%s): %s", eventing
->num
,
2561 target_pid_to_str (eventing
->ptid
),
2562 target_waitstatus_to_string (status
));
2564 do_cleanups (cleanups
);
2565 return eventing
->ptid
;
2568 /* The to_stop method of target record-btrace. */
2571 record_btrace_stop (struct target_ops
*ops
, ptid_t ptid
)
2573 DEBUG ("stop %s", target_pid_to_str (ptid
));
2575 /* As long as we're not replaying, just forward the request. */
2576 if ((execution_direction
!= EXEC_REVERSE
)
2577 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2580 ops
->to_stop (ops
, ptid
);
2584 struct thread_info
*tp
;
2586 ALL_NON_EXITED_THREADS (tp
)
2587 if (ptid_match (tp
->ptid
, ptid
))
2589 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2590 tp
->btrace
.flags
|= BTHR_STOP
;
2595 /* The to_can_execute_reverse method of target record-btrace. */
2598 record_btrace_can_execute_reverse (struct target_ops
*self
)
2603 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2606 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2608 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2610 struct thread_info
*tp
= inferior_thread ();
2612 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2615 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2618 /* The to_supports_stopped_by_sw_breakpoint method of target
2622 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2624 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2627 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2630 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2633 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2635 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2637 struct thread_info
*tp
= inferior_thread ();
2639 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2642 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2645 /* The to_supports_stopped_by_hw_breakpoint method of target
2649 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2651 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2654 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2657 /* The to_update_thread_list method of target record-btrace. */
2660 record_btrace_update_thread_list (struct target_ops
*ops
)
2662 /* We don't add or remove threads during replay. */
2663 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2666 /* Forward the request. */
2668 ops
->to_update_thread_list (ops
);
2671 /* The to_thread_alive method of target record-btrace. */
2674 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2676 /* We don't add or remove threads during replay. */
2677 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2678 return find_thread_ptid (ptid
) != NULL
;
2680 /* Forward the request. */
2682 return ops
->to_thread_alive (ops
, ptid
);
2685 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2689 record_btrace_set_replay (struct thread_info
*tp
,
2690 const struct btrace_insn_iterator
*it
)
2692 struct btrace_thread_info
*btinfo
;
2694 btinfo
= &tp
->btrace
;
2696 if (it
== NULL
|| it
->function
== NULL
)
2697 record_btrace_stop_replaying (tp
);
2700 if (btinfo
->replay
== NULL
)
2701 record_btrace_start_replaying (tp
);
2702 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2705 *btinfo
->replay
= *it
;
2706 registers_changed_ptid (tp
->ptid
);
2709 /* Start anew from the new replay position. */
2710 record_btrace_clear_histories (btinfo
);
2712 stop_pc
= regcache_read_pc (get_current_regcache ());
2713 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2716 /* The to_goto_record_begin method of target record-btrace. */
2719 record_btrace_goto_begin (struct target_ops
*self
)
2721 struct thread_info
*tp
;
2722 struct btrace_insn_iterator begin
;
2724 tp
= require_btrace_thread ();
2726 btrace_insn_begin (&begin
, &tp
->btrace
);
2727 record_btrace_set_replay (tp
, &begin
);
2730 /* The to_goto_record_end method of target record-btrace. */
2733 record_btrace_goto_end (struct target_ops
*ops
)
2735 struct thread_info
*tp
;
2737 tp
= require_btrace_thread ();
2739 record_btrace_set_replay (tp
, NULL
);
2742 /* The to_goto_record method of target record-btrace. */
2745 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2747 struct thread_info
*tp
;
2748 struct btrace_insn_iterator it
;
2749 unsigned int number
;
2754 /* Check for wrap-arounds. */
2756 error (_("Instruction number out of range."));
2758 tp
= require_btrace_thread ();
2760 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2762 error (_("No such instruction."));
2764 record_btrace_set_replay (tp
, &it
);
2767 /* The to_record_stop_replaying method of target record-btrace. */
2770 record_btrace_stop_replaying_all (struct target_ops
*self
)
2772 struct thread_info
*tp
;
2774 ALL_NON_EXITED_THREADS (tp
)
2775 record_btrace_stop_replaying (tp
);
2778 /* The to_execution_direction target method. */
2780 static enum exec_direction_kind
2781 record_btrace_execution_direction (struct target_ops
*self
)
2783 return record_btrace_resume_exec_dir
;
2786 /* The to_prepare_to_generate_core target method. */
2789 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2791 record_btrace_generating_corefile
= 1;
2794 /* The to_done_generating_core target method. */
2797 record_btrace_done_generating_core (struct target_ops
*self
)
2799 record_btrace_generating_corefile
= 0;
2802 /* Initialize the record-btrace target ops. */
2805 init_record_btrace_ops (void)
2807 struct target_ops
*ops
;
2809 ops
= &record_btrace_ops
;
2810 ops
->to_shortname
= "record-btrace";
2811 ops
->to_longname
= "Branch tracing target";
2812 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2813 ops
->to_open
= record_btrace_open
;
2814 ops
->to_close
= record_btrace_close
;
2815 ops
->to_async
= record_btrace_async
;
2816 ops
->to_detach
= record_detach
;
2817 ops
->to_disconnect
= record_disconnect
;
2818 ops
->to_mourn_inferior
= record_mourn_inferior
;
2819 ops
->to_kill
= record_kill
;
2820 ops
->to_stop_recording
= record_btrace_stop_recording
;
2821 ops
->to_info_record
= record_btrace_info
;
2822 ops
->to_insn_history
= record_btrace_insn_history
;
2823 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2824 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2825 ops
->to_call_history
= record_btrace_call_history
;
2826 ops
->to_call_history_from
= record_btrace_call_history_from
;
2827 ops
->to_call_history_range
= record_btrace_call_history_range
;
2828 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2829 ops
->to_record_will_replay
= record_btrace_will_replay
;
2830 ops
->to_record_stop_replaying
= record_btrace_stop_replaying_all
;
2831 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2832 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2833 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2834 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2835 ops
->to_store_registers
= record_btrace_store_registers
;
2836 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2837 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2838 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2839 ops
->to_resume
= record_btrace_resume
;
2840 ops
->to_wait
= record_btrace_wait
;
2841 ops
->to_stop
= record_btrace_stop
;
2842 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2843 ops
->to_thread_alive
= record_btrace_thread_alive
;
2844 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2845 ops
->to_goto_record_end
= record_btrace_goto_end
;
2846 ops
->to_goto_record
= record_btrace_goto
;
2847 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2848 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2849 ops
->to_supports_stopped_by_sw_breakpoint
2850 = record_btrace_supports_stopped_by_sw_breakpoint
;
2851 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2852 ops
->to_supports_stopped_by_hw_breakpoint
2853 = record_btrace_supports_stopped_by_hw_breakpoint
;
2854 ops
->to_execution_direction
= record_btrace_execution_direction
;
2855 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2856 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2857 ops
->to_stratum
= record_stratum
;
2858 ops
->to_magic
= OPS_MAGIC
;
2861 /* Start recording in BTS format. */
2864 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2866 if (args
!= NULL
&& *args
!= 0)
2867 error (_("Invalid argument."));
2869 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2873 execute_command ("target record-btrace", from_tty
);
2875 CATCH (exception
, RETURN_MASK_ALL
)
2877 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2878 throw_exception (exception
);
2883 /* Start recording Intel(R) Processor Trace. */
2886 cmd_record_btrace_pt_start (char *args
, int from_tty
)
2888 if (args
!= NULL
&& *args
!= 0)
2889 error (_("Invalid argument."));
2891 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2895 execute_command ("target record-btrace", from_tty
);
2897 CATCH (exception
, RETURN_MASK_ALL
)
2899 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2900 throw_exception (exception
);
2905 /* Alias for "target record". */
2908 cmd_record_btrace_start (char *args
, int from_tty
)
2910 if (args
!= NULL
&& *args
!= 0)
2911 error (_("Invalid argument."));
2913 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2917 execute_command ("target record-btrace", from_tty
);
2919 CATCH (exception
, RETURN_MASK_ALL
)
2921 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2925 execute_command ("target record-btrace", from_tty
);
2927 CATCH (exception
, RETURN_MASK_ALL
)
2929 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2930 throw_exception (exception
);
2937 /* The "set record btrace" command. */
2940 cmd_set_record_btrace (char *args
, int from_tty
)
2942 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
2945 /* The "show record btrace" command. */
2948 cmd_show_record_btrace (char *args
, int from_tty
)
2950 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
2953 /* The "show record btrace replay-memory-access" command. */
2956 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2957 struct cmd_list_element
*c
, const char *value
)
2959 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2960 replay_memory_access
);
2963 /* The "set record btrace bts" command. */
2966 cmd_set_record_btrace_bts (char *args
, int from_tty
)
2968 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2969 "by an appropriate subcommand.\n"));
2970 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
2971 all_commands
, gdb_stdout
);
2974 /* The "show record btrace bts" command. */
2977 cmd_show_record_btrace_bts (char *args
, int from_tty
)
2979 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
2982 /* The "set record btrace pt" command. */
2985 cmd_set_record_btrace_pt (char *args
, int from_tty
)
2987 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2988 "by an appropriate subcommand.\n"));
2989 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
2990 all_commands
, gdb_stdout
);
2993 /* The "show record btrace pt" command. */
2996 cmd_show_record_btrace_pt (char *args
, int from_tty
)
2998 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
3001 /* The "record bts buffer-size" show value function. */
3004 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3005 struct cmd_list_element
*c
,
3008 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3012 /* The "record pt buffer-size" show value function. */
3015 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3016 struct cmd_list_element
*c
,
3019 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3023 void _initialize_record_btrace (void);
3025 /* Initialize btrace commands. */
3028 _initialize_record_btrace (void)
3030 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3031 _("Start branch trace recording."), &record_btrace_cmdlist
,
3032 "record btrace ", 0, &record_cmdlist
);
3033 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3035 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3037 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3038 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3039 This format may not be available on all processors."),
3040 &record_btrace_cmdlist
);
3041 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3043 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3045 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
3046 This format may not be available on all processors."),
3047 &record_btrace_cmdlist
);
3048 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3050 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
3051 _("Set record options"), &set_record_btrace_cmdlist
,
3052 "set record btrace ", 0, &set_record_cmdlist
);
3054 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
3055 _("Show record options"), &show_record_btrace_cmdlist
,
3056 "show record btrace ", 0, &show_record_cmdlist
);
3058 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3059 replay_memory_access_types
, &replay_memory_access
, _("\
3060 Set what memory accesses are allowed during replay."), _("\
3061 Show what memory accesses are allowed during replay."),
3062 _("Default is READ-ONLY.\n\n\
3063 The btrace record target does not trace data.\n\
3064 The memory therefore corresponds to the live target and not \
3065 to the current replay position.\n\n\
3066 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3067 When READ-WRITE, allow accesses to read-only and read-write memory during \
3069 NULL
, cmd_show_replay_memory_access
,
3070 &set_record_btrace_cmdlist
,
3071 &show_record_btrace_cmdlist
);
3073 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
3074 _("Set record btrace bts options"),
3075 &set_record_btrace_bts_cmdlist
,
3076 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
3078 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
3079 _("Show record btrace bts options"),
3080 &show_record_btrace_bts_cmdlist
,
3081 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
3083 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3084 &record_btrace_conf
.bts
.size
,
3085 _("Set the record/replay bts buffer size."),
3086 _("Show the record/replay bts buffer size."), _("\
3087 When starting recording request a trace buffer of this size. \
3088 The actual buffer size may differ from the requested size. \
3089 Use \"info record\" to see the actual buffer size.\n\n\
3090 Bigger buffers allow longer recording but also take more time to process \
3091 the recorded execution trace.\n\n\
3092 The trace buffer size may not be changed while recording."), NULL
,
3093 show_record_bts_buffer_size_value
,
3094 &set_record_btrace_bts_cmdlist
,
3095 &show_record_btrace_bts_cmdlist
);
3097 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
3098 _("Set record btrace pt options"),
3099 &set_record_btrace_pt_cmdlist
,
3100 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
3102 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
3103 _("Show record btrace pt options"),
3104 &show_record_btrace_pt_cmdlist
,
3105 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
3107 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3108 &record_btrace_conf
.pt
.size
,
3109 _("Set the record/replay pt buffer size."),
3110 _("Show the record/replay pt buffer size."), _("\
3111 Bigger buffers allow longer recording but also take more time to process \
3112 the recorded execution.\n\
3113 The actual buffer size may differ from the requested size. Use \"info record\" \
3114 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3115 &set_record_btrace_pt_cmdlist
,
3116 &show_record_btrace_pt_cmdlist
);
3118 init_record_btrace_ops ();
3119 add_target (&record_btrace_ops
);
3121 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3124 record_btrace_conf
.bts
.size
= 64 * 1024;
3125 record_btrace_conf
.pt
.size
= 16 * 1024;