1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "event-loop.h"
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops
;
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer
*record_btrace_thread_observer
;
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only
[] = "read-only";
52 static const char replay_memory_access_read_write
[] = "read-write";
53 static const char *const replay_memory_access_types
[] =
55 replay_memory_access_read_only
,
56 replay_memory_access_read_write
,
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access
= replay_memory_access_read_only
;
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element
*set_record_btrace_cmdlist
;
65 static struct cmd_list_element
*show_record_btrace_cmdlist
;
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile
;
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf
;
79 /* Command list for "record btrace". */
80 static struct cmd_list_element
*record_btrace_cmdlist
;
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
84 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
88 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
93 #define DEBUG(msg, args...) \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
103 /* Update the branch trace for the current thread and return a pointer to its
106 Throws an error if there is no thread or no trace. This function never
109 static struct thread_info
*
110 require_btrace_thread (void)
112 struct thread_info
*tp
;
116 tp
= find_thread_ptid (inferior_ptid
);
118 error (_("No thread."));
120 validate_registers_access ();
124 if (btrace_is_empty (tp
))
125 error (_("No trace."));
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
133 Throws an error if there is no thread or no trace. This function never
136 static struct btrace_thread_info
*
137 require_btrace (void)
139 struct thread_info
*tp
;
141 tp
= require_btrace_thread ();
146 /* Enable branch tracing for one thread. Warn on errors. */
149 record_btrace_enable_warn (struct thread_info
*tp
)
153 btrace_enable (tp
, &record_btrace_conf
);
155 CATCH (error
, RETURN_MASK_ERROR
)
157 warning ("%s", error
.message
);
162 /* Callback function to disable branch tracing for one thread. */
165 record_btrace_disable_callback (void *arg
)
167 struct thread_info
*tp
= (struct thread_info
*) arg
;
172 /* Enable automatic tracing of new threads. */
175 record_btrace_auto_enable (void)
177 DEBUG ("attach thread observer");
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn
);
183 /* Disable automatic tracing of new threads. */
186 record_btrace_auto_disable (void)
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer
== NULL
)
192 DEBUG ("detach thread observer");
194 observer_detach_new_thread (record_btrace_thread_observer
);
195 record_btrace_thread_observer
= NULL
;
198 /* The record-btrace async event handler function. */
201 record_btrace_handle_async_inferior_event (gdb_client_data data
)
203 inferior_event_handler (INF_REG_EVENT
, NULL
);
206 /* See record-btrace.h. */
209 record_btrace_push_target (void)
213 record_btrace_auto_enable ();
215 push_target (&record_btrace_ops
);
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
220 record_btrace_generating_corefile
= 0;
222 format
= btrace_format_short_string (record_btrace_conf
.format
);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format
);
226 /* The to_open method of target record-btrace. */
229 record_btrace_open (const char *args
, int from_tty
)
231 struct cleanup
*disable_chain
;
232 struct thread_info
*tp
;
238 if (!target_has_execution
)
239 error (_("The program is not being run."));
241 gdb_assert (record_btrace_thread_observer
== NULL
);
243 disable_chain
= make_cleanup (null_cleanup
, NULL
);
244 ALL_NON_EXITED_THREADS (tp
)
245 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
247 btrace_enable (tp
, &record_btrace_conf
);
249 make_cleanup (record_btrace_disable_callback
, tp
);
252 record_btrace_push_target ();
254 discard_cleanups (disable_chain
);
257 /* The to_stop_recording method of target record-btrace. */
260 record_btrace_stop_recording (struct target_ops
*self
)
262 struct thread_info
*tp
;
264 DEBUG ("stop recording");
266 record_btrace_auto_disable ();
268 ALL_NON_EXITED_THREADS (tp
)
269 if (tp
->btrace
.target
!= NULL
)
273 /* The to_disconnect method of target record-btrace. */
276 record_btrace_disconnect (struct target_ops
*self
, const char *args
,
279 struct target_ops
*beneath
= self
->beneath
;
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self
);
284 /* Forward disconnect. */
285 beneath
->to_disconnect (beneath
, args
, from_tty
);
288 /* The to_close method of target record-btrace. */
291 record_btrace_close (struct target_ops
*self
)
293 struct thread_info
*tp
;
295 if (record_btrace_async_inferior_event_handler
!= NULL
)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp
)
305 btrace_teardown (tp
);
308 /* The to_async method of target record-btrace. */
311 record_btrace_async (struct target_ops
*ops
, int enable
)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
316 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
318 ops
->beneath
->to_async (ops
->beneath
, enable
);
321 /* Adjusts the size and returns a human readable size suffix. */
324 record_btrace_adjust_size (unsigned int *size
)
330 if ((sz
& ((1u << 30) - 1)) == 0)
335 else if ((sz
& ((1u << 20) - 1)) == 0)
340 else if ((sz
& ((1u << 10) - 1)) == 0)
349 /* Print a BTS configuration. */
352 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
360 suffix
= record_btrace_adjust_size (&size
);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
365 /* Print an Intel Processor Trace configuration. */
368 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
376 suffix
= record_btrace_adjust_size (&size
);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
381 /* Print a branch tracing configuration. */
384 record_btrace_print_conf (const struct btrace_config
*conf
)
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf
->format
));
389 switch (conf
->format
)
391 case BTRACE_FORMAT_NONE
:
394 case BTRACE_FORMAT_BTS
:
395 record_btrace_print_bts_conf (&conf
->bts
);
398 case BTRACE_FORMAT_PT
:
399 record_btrace_print_pt_conf (&conf
->pt
);
403 internal_error (__FILE__
, __LINE__
, _("Unkown branch trace format."));
406 /* The to_info_record method of target record-btrace. */
409 record_btrace_info (struct target_ops
*self
)
411 struct btrace_thread_info
*btinfo
;
412 const struct btrace_config
*conf
;
413 struct thread_info
*tp
;
414 unsigned int insns
, calls
, gaps
;
418 tp
= find_thread_ptid (inferior_ptid
);
420 error (_("No thread."));
422 validate_registers_access ();
424 btinfo
= &tp
->btrace
;
426 conf
= btrace_conf (btinfo
);
428 record_btrace_print_conf (conf
);
436 if (!btrace_is_empty (tp
))
438 struct btrace_call_iterator call
;
439 struct btrace_insn_iterator insn
;
441 btrace_call_end (&call
, btinfo
);
442 btrace_call_prev (&call
, 1);
443 calls
= btrace_call_number (&call
);
445 btrace_insn_end (&insn
, btinfo
);
447 insns
= btrace_insn_number (&insn
);
450 /* The last instruction does not really belong to the trace. */
457 /* Skip gaps at the end. */
460 steps
= btrace_insn_prev (&insn
, 1);
464 insns
= btrace_insn_number (&insn
);
469 gaps
= btinfo
->ngaps
;
472 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
473 "for thread %s (%s).\n"), insns
, calls
, gaps
,
474 print_thread_id (tp
), target_pid_to_str (tp
->ptid
));
476 if (btrace_is_replaying (tp
))
477 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
478 btrace_insn_number (btinfo
->replay
));
481 /* Print a decode error. */
484 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
485 enum btrace_format format
)
490 errstr
= _("unknown");
498 case BTRACE_FORMAT_BTS
:
504 case BDE_BTS_OVERFLOW
:
505 errstr
= _("instruction overflow");
508 case BDE_BTS_INSN_SIZE
:
509 errstr
= _("unknown instruction");
514 #if defined (HAVE_LIBIPT)
515 case BTRACE_FORMAT_PT
:
518 case BDE_PT_USER_QUIT
:
520 errstr
= _("trace decode cancelled");
523 case BDE_PT_DISABLED
:
525 errstr
= _("disabled");
528 case BDE_PT_OVERFLOW
:
530 errstr
= _("overflow");
535 errstr
= pt_errstr (pt_errcode (errcode
));
539 #endif /* defined (HAVE_LIBIPT) */
542 uiout
->text (_("["));
545 uiout
->text (_("decode error ("));
546 uiout
->field_int ("errcode", errcode
);
547 uiout
->text (_("): "));
549 uiout
->text (errstr
);
550 uiout
->text (_("]\n"));
553 /* Print an unsigned int. */
556 ui_out_field_uint (struct ui_out
*uiout
, const char *fld
, unsigned int val
)
558 uiout
->field_fmt (fld
, "%u", val
);
561 /* A range of source lines. */
563 struct btrace_line_range
565 /* The symtab this line is from. */
566 struct symtab
*symtab
;
568 /* The first line (inclusive). */
571 /* The last line (exclusive). */
575 /* Construct a line range. */
577 static struct btrace_line_range
578 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
580 struct btrace_line_range range
;
582 range
.symtab
= symtab
;
589 /* Add a line to a line range. */
591 static struct btrace_line_range
592 btrace_line_range_add (struct btrace_line_range range
, int line
)
594 if (range
.end
<= range
.begin
)
596 /* This is the first entry. */
598 range
.end
= line
+ 1;
600 else if (line
< range
.begin
)
602 else if (range
.end
< line
)
608 /* Return non-zero if RANGE is empty, zero otherwise. */
611 btrace_line_range_is_empty (struct btrace_line_range range
)
613 return range
.end
<= range
.begin
;
616 /* Return non-zero if LHS contains RHS, zero otherwise. */
619 btrace_line_range_contains_range (struct btrace_line_range lhs
,
620 struct btrace_line_range rhs
)
622 return ((lhs
.symtab
== rhs
.symtab
)
623 && (lhs
.begin
<= rhs
.begin
)
624 && (rhs
.end
<= lhs
.end
));
627 /* Find the line range associated with PC. */
629 static struct btrace_line_range
630 btrace_find_line_range (CORE_ADDR pc
)
632 struct btrace_line_range range
;
633 struct linetable_entry
*lines
;
634 struct linetable
*ltable
;
635 struct symtab
*symtab
;
638 symtab
= find_pc_line_symtab (pc
);
640 return btrace_mk_line_range (NULL
, 0, 0);
642 ltable
= SYMTAB_LINETABLE (symtab
);
644 return btrace_mk_line_range (symtab
, 0, 0);
646 nlines
= ltable
->nitems
;
647 lines
= ltable
->item
;
649 return btrace_mk_line_range (symtab
, 0, 0);
651 range
= btrace_mk_line_range (symtab
, 0, 0);
652 for (i
= 0; i
< nlines
- 1; i
++)
654 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0))
655 range
= btrace_line_range_add (range
, lines
[i
].line
);
661 /* Print source lines in LINES to UIOUT.
663 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
664 instructions corresponding to that source line. When printing a new source
665 line, we do the cleanups for the open chain and open a new cleanup chain for
666 the new source line. If the source line range in LINES is not empty, this
667 function will leave the cleanup chain for the last printed source line open
668 so instructions can be added to it. */
671 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
672 struct cleanup
**ui_item_chain
, int flags
)
674 print_source_lines_flags psl_flags
;
678 if (flags
& DISASSEMBLY_FILENAME
)
679 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
681 for (line
= lines
.begin
; line
< lines
.end
; ++line
)
683 if (*ui_item_chain
!= NULL
)
684 do_cleanups (*ui_item_chain
);
687 = make_cleanup_ui_out_tuple_begin_end (uiout
, "src_and_asm_line");
689 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
691 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
695 /* Disassemble a section of the recorded instruction trace. */
698 btrace_insn_history (struct ui_out
*uiout
,
699 const struct btrace_thread_info
*btinfo
,
700 const struct btrace_insn_iterator
*begin
,
701 const struct btrace_insn_iterator
*end
, int flags
)
703 struct cleanup
*cleanups
, *ui_item_chain
;
704 struct gdbarch
*gdbarch
;
705 struct btrace_insn_iterator it
;
706 struct btrace_line_range last_lines
;
708 DEBUG ("itrace (0x%x): [%u; %u)", flags
, btrace_insn_number (begin
),
709 btrace_insn_number (end
));
711 flags
|= DISASSEMBLY_SPECULATIVE
;
713 gdbarch
= target_gdbarch ();
714 last_lines
= btrace_mk_line_range (NULL
, 0, 0);
716 cleanups
= make_cleanup_ui_out_list_begin_end (uiout
, "asm_insns");
718 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719 instructions corresponding to that line. */
720 ui_item_chain
= NULL
;
722 for (it
= *begin
; btrace_insn_cmp (&it
, end
) != 0; btrace_insn_next (&it
, 1))
724 const struct btrace_insn
*insn
;
726 insn
= btrace_insn_get (&it
);
728 /* A NULL instruction indicates a gap in the trace. */
731 const struct btrace_config
*conf
;
733 conf
= btrace_conf (btinfo
);
735 /* We have trace so we must have a configuration. */
736 gdb_assert (conf
!= NULL
);
738 btrace_ui_out_decode_error (uiout
, it
.function
->errcode
,
743 struct disasm_insn dinsn
;
745 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
747 struct btrace_line_range lines
;
749 lines
= btrace_find_line_range (insn
->pc
);
750 if (!btrace_line_range_is_empty (lines
)
751 && !btrace_line_range_contains_range (last_lines
, lines
))
753 btrace_print_lines (lines
, uiout
, &ui_item_chain
, flags
);
756 else if (ui_item_chain
== NULL
)
759 = make_cleanup_ui_out_tuple_begin_end (uiout
,
761 /* No source information. */
762 make_cleanup_ui_out_list_begin_end (uiout
, "line_asm_insn");
765 gdb_assert (ui_item_chain
!= NULL
);
768 memset (&dinsn
, 0, sizeof (dinsn
));
769 dinsn
.number
= btrace_insn_number (&it
);
770 dinsn
.addr
= insn
->pc
;
772 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
773 dinsn
.is_speculative
= 1;
775 gdb_pretty_print_insn (gdbarch
, uiout
, &dinsn
, flags
);
779 do_cleanups (cleanups
);
782 /* The to_insn_history method of target record-btrace. */
785 record_btrace_insn_history (struct target_ops
*self
, int size
, int flags
)
787 struct btrace_thread_info
*btinfo
;
788 struct btrace_insn_history
*history
;
789 struct btrace_insn_iterator begin
, end
;
790 struct cleanup
*uiout_cleanup
;
791 struct ui_out
*uiout
;
792 unsigned int context
, covered
;
794 uiout
= current_uiout
;
795 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
797 context
= abs (size
);
799 error (_("Bad record instruction-history-size."));
801 btinfo
= require_btrace ();
802 history
= btinfo
->insn_history
;
805 struct btrace_insn_iterator
*replay
;
807 DEBUG ("insn-history (0x%x): %d", flags
, size
);
809 /* If we're replaying, we start at the replay position. Otherwise, we
810 start at the tail of the trace. */
811 replay
= btinfo
->replay
;
815 btrace_insn_end (&begin
, btinfo
);
817 /* We start from here and expand in the requested direction. Then we
818 expand in the other direction, as well, to fill up any remaining
823 /* We want the current position covered, as well. */
824 covered
= btrace_insn_next (&end
, 1);
825 covered
+= btrace_insn_prev (&begin
, context
- covered
);
826 covered
+= btrace_insn_next (&end
, context
- covered
);
830 covered
= btrace_insn_next (&end
, context
);
831 covered
+= btrace_insn_prev (&begin
, context
- covered
);
836 begin
= history
->begin
;
839 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags
, size
,
840 btrace_insn_number (&begin
), btrace_insn_number (&end
));
845 covered
= btrace_insn_prev (&begin
, context
);
850 covered
= btrace_insn_next (&end
, context
);
855 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
859 printf_unfiltered (_("At the start of the branch trace record.\n"));
861 printf_unfiltered (_("At the end of the branch trace record.\n"));
864 btrace_set_insn_history (btinfo
, &begin
, &end
);
865 do_cleanups (uiout_cleanup
);
868 /* The to_insn_history_range method of target record-btrace. */
871 record_btrace_insn_history_range (struct target_ops
*self
,
872 ULONGEST from
, ULONGEST to
, int flags
)
874 struct btrace_thread_info
*btinfo
;
875 struct btrace_insn_history
*history
;
876 struct btrace_insn_iterator begin
, end
;
877 struct cleanup
*uiout_cleanup
;
878 struct ui_out
*uiout
;
879 unsigned int low
, high
;
882 uiout
= current_uiout
;
883 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
888 DEBUG ("insn-history (0x%x): [%u; %u)", flags
, low
, high
);
890 /* Check for wrap-arounds. */
891 if (low
!= from
|| high
!= to
)
892 error (_("Bad range."));
895 error (_("Bad range."));
897 btinfo
= require_btrace ();
899 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
901 error (_("Range out of bounds."));
903 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
906 /* Silently truncate the range. */
907 btrace_insn_end (&end
, btinfo
);
911 /* We want both begin and end to be inclusive. */
912 btrace_insn_next (&end
, 1);
915 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
916 btrace_set_insn_history (btinfo
, &begin
, &end
);
918 do_cleanups (uiout_cleanup
);
921 /* The to_insn_history_from method of target record-btrace. */
924 record_btrace_insn_history_from (struct target_ops
*self
,
925 ULONGEST from
, int size
, int flags
)
927 ULONGEST begin
, end
, context
;
929 context
= abs (size
);
931 error (_("Bad record instruction-history-size."));
940 begin
= from
- context
+ 1;
945 end
= from
+ context
- 1;
947 /* Check for wrap-around. */
952 record_btrace_insn_history_range (self
, begin
, end
, flags
);
955 /* Print the instruction number range for a function call history line. */
958 btrace_call_history_insn_range (struct ui_out
*uiout
,
959 const struct btrace_function
*bfun
)
961 unsigned int begin
, end
, size
;
963 size
= VEC_length (btrace_insn_s
, bfun
->insn
);
964 gdb_assert (size
> 0);
966 begin
= bfun
->insn_offset
;
967 end
= begin
+ size
- 1;
969 ui_out_field_uint (uiout
, "insn begin", begin
);
971 ui_out_field_uint (uiout
, "insn end", end
);
974 /* Compute the lowest and highest source line for the instructions in BFUN
975 and return them in PBEGIN and PEND.
976 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
977 result from inlining or macro expansion. */
980 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
981 int *pbegin
, int *pend
)
983 struct btrace_insn
*insn
;
984 struct symtab
*symtab
;
996 symtab
= symbol_symtab (sym
);
998 for (idx
= 0; VEC_iterate (btrace_insn_s
, bfun
->insn
, idx
, insn
); ++idx
)
1000 struct symtab_and_line sal
;
1002 sal
= find_pc_line (insn
->pc
, 0);
1003 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
1006 begin
= std::min (begin
, sal
.line
);
1007 end
= std::max (end
, sal
.line
);
1015 /* Print the source line information for a function call history line. */
1018 btrace_call_history_src_line (struct ui_out
*uiout
,
1019 const struct btrace_function
*bfun
)
1028 uiout
->field_string ("file",
1029 symtab_to_filename_for_display (symbol_symtab (sym
)));
1031 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1036 uiout
->field_int ("min line", begin
);
1042 uiout
->field_int ("max line", end
);
1045 /* Get the name of a branch trace function. */
1048 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1050 struct minimal_symbol
*msym
;
1060 return SYMBOL_PRINT_NAME (sym
);
1061 else if (msym
!= NULL
)
1062 return MSYMBOL_PRINT_NAME (msym
);
1067 /* Disassemble a section of the recorded function trace. */
1070 btrace_call_history (struct ui_out
*uiout
,
1071 const struct btrace_thread_info
*btinfo
,
1072 const struct btrace_call_iterator
*begin
,
1073 const struct btrace_call_iterator
*end
,
1076 struct btrace_call_iterator it
;
1077 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1079 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1080 btrace_call_number (end
));
1082 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1084 const struct btrace_function
*bfun
;
1085 struct minimal_symbol
*msym
;
1088 bfun
= btrace_call_get (&it
);
1092 /* Print the function index. */
1093 ui_out_field_uint (uiout
, "index", bfun
->number
);
1096 /* Indicate gaps in the trace. */
1097 if (bfun
->errcode
!= 0)
1099 const struct btrace_config
*conf
;
1101 conf
= btrace_conf (btinfo
);
1103 /* We have trace so we must have a configuration. */
1104 gdb_assert (conf
!= NULL
);
1106 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1111 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1113 int level
= bfun
->level
+ btinfo
->level
, i
;
1115 for (i
= 0; i
< level
; ++i
)
1120 uiout
->field_string ("function", SYMBOL_PRINT_NAME (sym
));
1121 else if (msym
!= NULL
)
1122 uiout
->field_string ("function", MSYMBOL_PRINT_NAME (msym
));
1123 else if (!uiout
->is_mi_like_p ())
1124 uiout
->field_string ("function", "??");
1126 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1128 uiout
->text (_("\tinst "));
1129 btrace_call_history_insn_range (uiout
, bfun
);
1132 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1134 uiout
->text (_("\tat "));
1135 btrace_call_history_src_line (uiout
, bfun
);
1142 /* The to_call_history method of target record-btrace. */
1145 record_btrace_call_history (struct target_ops
*self
, int size
, int int_flags
)
1147 struct btrace_thread_info
*btinfo
;
1148 struct btrace_call_history
*history
;
1149 struct btrace_call_iterator begin
, end
;
1150 struct cleanup
*uiout_cleanup
;
1151 struct ui_out
*uiout
;
1152 unsigned int context
, covered
;
1153 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1155 uiout
= current_uiout
;
1156 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1158 context
= abs (size
);
1160 error (_("Bad record function-call-history-size."));
1162 btinfo
= require_btrace ();
1163 history
= btinfo
->call_history
;
1164 if (history
== NULL
)
1166 struct btrace_insn_iterator
*replay
;
1168 DEBUG ("call-history (0x%x): %d", int_flags
, size
);
1170 /* If we're replaying, we start at the replay position. Otherwise, we
1171 start at the tail of the trace. */
1172 replay
= btinfo
->replay
;
1175 begin
.function
= replay
->function
;
1176 begin
.btinfo
= btinfo
;
1179 btrace_call_end (&begin
, btinfo
);
1181 /* We start from here and expand in the requested direction. Then we
1182 expand in the other direction, as well, to fill up any remaining
1187 /* We want the current position covered, as well. */
1188 covered
= btrace_call_next (&end
, 1);
1189 covered
+= btrace_call_prev (&begin
, context
- covered
);
1190 covered
+= btrace_call_next (&end
, context
- covered
);
1194 covered
= btrace_call_next (&end
, context
);
1195 covered
+= btrace_call_prev (&begin
, context
- covered
);
1200 begin
= history
->begin
;
1203 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags
, size
,
1204 btrace_call_number (&begin
), btrace_call_number (&end
));
1209 covered
= btrace_call_prev (&begin
, context
);
1214 covered
= btrace_call_next (&end
, context
);
1219 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1223 printf_unfiltered (_("At the start of the branch trace record.\n"));
1225 printf_unfiltered (_("At the end of the branch trace record.\n"));
1228 btrace_set_call_history (btinfo
, &begin
, &end
);
1229 do_cleanups (uiout_cleanup
);
1232 /* The to_call_history_range method of target record-btrace. */
1235 record_btrace_call_history_range (struct target_ops
*self
,
1236 ULONGEST from
, ULONGEST to
,
1239 struct btrace_thread_info
*btinfo
;
1240 struct btrace_call_history
*history
;
1241 struct btrace_call_iterator begin
, end
;
1242 struct cleanup
*uiout_cleanup
;
1243 struct ui_out
*uiout
;
1244 unsigned int low
, high
;
1246 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1248 uiout
= current_uiout
;
1249 uiout_cleanup
= make_cleanup_ui_out_tuple_begin_end (uiout
,
1254 DEBUG ("call-history (0x%x): [%u; %u)", int_flags
, low
, high
);
1256 /* Check for wrap-arounds. */
1257 if (low
!= from
|| high
!= to
)
1258 error (_("Bad range."));
1261 error (_("Bad range."));
1263 btinfo
= require_btrace ();
1265 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1267 error (_("Range out of bounds."));
1269 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1272 /* Silently truncate the range. */
1273 btrace_call_end (&end
, btinfo
);
1277 /* We want both begin and end to be inclusive. */
1278 btrace_call_next (&end
, 1);
1281 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1282 btrace_set_call_history (btinfo
, &begin
, &end
);
1284 do_cleanups (uiout_cleanup
);
1287 /* The to_call_history_from method of target record-btrace. */
1290 record_btrace_call_history_from (struct target_ops
*self
,
1291 ULONGEST from
, int size
,
1294 ULONGEST begin
, end
, context
;
1295 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1297 context
= abs (size
);
1299 error (_("Bad record function-call-history-size."));
1308 begin
= from
- context
+ 1;
1313 end
= from
+ context
- 1;
1315 /* Check for wrap-around. */
1320 record_btrace_call_history_range (self
, begin
, end
, flags
);
1323 /* The to_record_is_replaying method of target record-btrace. */
1326 record_btrace_is_replaying (struct target_ops
*self
, ptid_t ptid
)
1328 struct thread_info
*tp
;
1330 ALL_NON_EXITED_THREADS (tp
)
1331 if (ptid_match (tp
->ptid
, ptid
) && btrace_is_replaying (tp
))
1337 /* The to_record_will_replay method of target record-btrace. */
1340 record_btrace_will_replay (struct target_ops
*self
, ptid_t ptid
, int dir
)
1342 return dir
== EXEC_REVERSE
|| record_btrace_is_replaying (self
, ptid
);
1345 /* The to_xfer_partial method of target record-btrace. */
1347 static enum target_xfer_status
1348 record_btrace_xfer_partial (struct target_ops
*ops
, enum target_object object
,
1349 const char *annex
, gdb_byte
*readbuf
,
1350 const gdb_byte
*writebuf
, ULONGEST offset
,
1351 ULONGEST len
, ULONGEST
*xfered_len
)
1353 struct target_ops
*t
;
1355 /* Filter out requests that don't make sense during replay. */
1356 if (replay_memory_access
== replay_memory_access_read_only
1357 && !record_btrace_generating_corefile
1358 && record_btrace_is_replaying (ops
, inferior_ptid
))
1362 case TARGET_OBJECT_MEMORY
:
1364 struct target_section
*section
;
1366 /* We do not allow writing memory in general. */
1367 if (writebuf
!= NULL
)
1370 return TARGET_XFER_UNAVAILABLE
;
1373 /* We allow reading readonly memory. */
1374 section
= target_section_by_addr (ops
, offset
);
1375 if (section
!= NULL
)
1377 /* Check if the section we found is readonly. */
1378 if ((bfd_get_section_flags (section
->the_bfd_section
->owner
,
1379 section
->the_bfd_section
)
1380 & SEC_READONLY
) != 0)
1382 /* Truncate the request to fit into this section. */
1383 len
= std::min (len
, section
->endaddr
- offset
);
1389 return TARGET_XFER_UNAVAILABLE
;
1394 /* Forward the request. */
1396 return ops
->to_xfer_partial (ops
, object
, annex
, readbuf
, writebuf
,
1397 offset
, len
, xfered_len
);
1400 /* The to_insert_breakpoint method of target record-btrace. */
1403 record_btrace_insert_breakpoint (struct target_ops
*ops
,
1404 struct gdbarch
*gdbarch
,
1405 struct bp_target_info
*bp_tgt
)
1410 /* Inserting breakpoints requires accessing memory. Allow it for the
1411 duration of this function. */
1412 old
= replay_memory_access
;
1413 replay_memory_access
= replay_memory_access_read_write
;
1418 ret
= ops
->beneath
->to_insert_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
);
1420 CATCH (except
, RETURN_MASK_ALL
)
1422 replay_memory_access
= old
;
1423 throw_exception (except
);
1426 replay_memory_access
= old
;
1431 /* The to_remove_breakpoint method of target record-btrace. */
1434 record_btrace_remove_breakpoint (struct target_ops
*ops
,
1435 struct gdbarch
*gdbarch
,
1436 struct bp_target_info
*bp_tgt
,
1437 enum remove_bp_reason reason
)
1442 /* Removing breakpoints requires accessing memory. Allow it for the
1443 duration of this function. */
1444 old
= replay_memory_access
;
1445 replay_memory_access
= replay_memory_access_read_write
;
1450 ret
= ops
->beneath
->to_remove_breakpoint (ops
->beneath
, gdbarch
, bp_tgt
,
1453 CATCH (except
, RETURN_MASK_ALL
)
1455 replay_memory_access
= old
;
1456 throw_exception (except
);
1459 replay_memory_access
= old
;
1464 /* The to_fetch_registers method of target record-btrace. */
1467 record_btrace_fetch_registers (struct target_ops
*ops
,
1468 struct regcache
*regcache
, int regno
)
1470 struct btrace_insn_iterator
*replay
;
1471 struct thread_info
*tp
;
1473 tp
= find_thread_ptid (inferior_ptid
);
1474 gdb_assert (tp
!= NULL
);
1476 replay
= tp
->btrace
.replay
;
1477 if (replay
!= NULL
&& !record_btrace_generating_corefile
)
1479 const struct btrace_insn
*insn
;
1480 struct gdbarch
*gdbarch
;
1483 gdbarch
= get_regcache_arch (regcache
);
1484 pcreg
= gdbarch_pc_regnum (gdbarch
);
1488 /* We can only provide the PC register. */
1489 if (regno
>= 0 && regno
!= pcreg
)
1492 insn
= btrace_insn_get (replay
);
1493 gdb_assert (insn
!= NULL
);
1495 regcache_raw_supply (regcache
, regno
, &insn
->pc
);
1499 struct target_ops
*t
= ops
->beneath
;
1501 t
->to_fetch_registers (t
, regcache
, regno
);
1505 /* The to_store_registers method of target record-btrace. */
1508 record_btrace_store_registers (struct target_ops
*ops
,
1509 struct regcache
*regcache
, int regno
)
1511 struct target_ops
*t
;
1513 if (!record_btrace_generating_corefile
1514 && record_btrace_is_replaying (ops
, inferior_ptid
))
1515 error (_("Cannot write registers while replaying."));
1517 gdb_assert (may_write_registers
!= 0);
1520 t
->to_store_registers (t
, regcache
, regno
);
1523 /* The to_prepare_to_store method of target record-btrace. */
1526 record_btrace_prepare_to_store (struct target_ops
*ops
,
1527 struct regcache
*regcache
)
1529 struct target_ops
*t
;
1531 if (!record_btrace_generating_corefile
1532 && record_btrace_is_replaying (ops
, inferior_ptid
))
1536 t
->to_prepare_to_store (t
, regcache
);
1539 /* The branch trace frame cache. */
1541 struct btrace_frame_cache
1544 struct thread_info
*tp
;
1546 /* The frame info. */
1547 struct frame_info
*frame
;
1549 /* The branch trace function segment. */
1550 const struct btrace_function
*bfun
;
1553 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1555 static htab_t bfcache
;
1557 /* hash_f for htab_create_alloc of bfcache. */
1560 bfcache_hash (const void *arg
)
1562 const struct btrace_frame_cache
*cache
1563 = (const struct btrace_frame_cache
*) arg
;
1565 return htab_hash_pointer (cache
->frame
);
1568 /* eq_f for htab_create_alloc of bfcache. */
1571 bfcache_eq (const void *arg1
, const void *arg2
)
1573 const struct btrace_frame_cache
*cache1
1574 = (const struct btrace_frame_cache
*) arg1
;
1575 const struct btrace_frame_cache
*cache2
1576 = (const struct btrace_frame_cache
*) arg2
;
1578 return cache1
->frame
== cache2
->frame
;
1581 /* Create a new btrace frame cache. */
1583 static struct btrace_frame_cache
*
1584 bfcache_new (struct frame_info
*frame
)
1586 struct btrace_frame_cache
*cache
;
1589 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1590 cache
->frame
= frame
;
1592 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1593 gdb_assert (*slot
== NULL
);
1599 /* Extract the branch trace function from a branch trace frame. */
1601 static const struct btrace_function
*
1602 btrace_get_frame_function (struct frame_info
*frame
)
1604 const struct btrace_frame_cache
*cache
;
1605 const struct btrace_function
*bfun
;
1606 struct btrace_frame_cache pattern
;
1609 pattern
.frame
= frame
;
1611 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1615 cache
= (const struct btrace_frame_cache
*) *slot
;
1619 /* Implement stop_reason method for record_btrace_frame_unwind. */
1621 static enum unwind_stop_reason
1622 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1625 const struct btrace_frame_cache
*cache
;
1626 const struct btrace_function
*bfun
;
1628 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1630 gdb_assert (bfun
!= NULL
);
1632 if (bfun
->up
== NULL
)
1633 return UNWIND_UNAVAILABLE
;
1635 return UNWIND_NO_REASON
;
1638 /* Implement this_id method for record_btrace_frame_unwind. */
1641 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1642 struct frame_id
*this_id
)
1644 const struct btrace_frame_cache
*cache
;
1645 const struct btrace_function
*bfun
;
1646 CORE_ADDR code
, special
;
1648 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1651 gdb_assert (bfun
!= NULL
);
1653 while (bfun
->segment
.prev
!= NULL
)
1654 bfun
= bfun
->segment
.prev
;
1656 code
= get_frame_func (this_frame
);
1657 special
= bfun
->number
;
1659 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1661 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1662 btrace_get_bfun_name (cache
->bfun
),
1663 core_addr_to_string_nz (this_id
->code_addr
),
1664 core_addr_to_string_nz (this_id
->special_addr
));
1667 /* Implement prev_register method for record_btrace_frame_unwind. */
1669 static struct value
*
1670 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1674 const struct btrace_frame_cache
*cache
;
1675 const struct btrace_function
*bfun
, *caller
;
1676 const struct btrace_insn
*insn
;
1677 struct gdbarch
*gdbarch
;
1681 gdbarch
= get_frame_arch (this_frame
);
1682 pcreg
= gdbarch_pc_regnum (gdbarch
);
1683 if (pcreg
< 0 || regnum
!= pcreg
)
1684 throw_error (NOT_AVAILABLE_ERROR
,
1685 _("Registers are not available in btrace record history"));
1687 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1689 gdb_assert (bfun
!= NULL
);
1693 throw_error (NOT_AVAILABLE_ERROR
,
1694 _("No caller in btrace record history"));
1696 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1698 insn
= VEC_index (btrace_insn_s
, caller
->insn
, 0);
1703 insn
= VEC_last (btrace_insn_s
, caller
->insn
);
1706 pc
+= gdb_insn_length (gdbarch
, pc
);
1709 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1710 btrace_get_bfun_name (bfun
), bfun
->level
,
1711 core_addr_to_string_nz (pc
));
1713 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1716 /* Implement sniffer method for record_btrace_frame_unwind. */
1719 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1720 struct frame_info
*this_frame
,
1723 const struct btrace_function
*bfun
;
1724 struct btrace_frame_cache
*cache
;
1725 struct thread_info
*tp
;
1726 struct frame_info
*next
;
1728 /* THIS_FRAME does not contain a reference to its thread. */
1729 tp
= find_thread_ptid (inferior_ptid
);
1730 gdb_assert (tp
!= NULL
);
1733 next
= get_next_frame (this_frame
);
1736 const struct btrace_insn_iterator
*replay
;
1738 replay
= tp
->btrace
.replay
;
1740 bfun
= replay
->function
;
1744 const struct btrace_function
*callee
;
1746 callee
= btrace_get_frame_function (next
);
1747 if (callee
!= NULL
&& (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1754 DEBUG ("[frame] sniffed frame for %s on level %d",
1755 btrace_get_bfun_name (bfun
), bfun
->level
);
1757 /* This is our frame. Initialize the frame cache. */
1758 cache
= bfcache_new (this_frame
);
1762 *this_cache
= cache
;
1766 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1769 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1770 struct frame_info
*this_frame
,
1773 const struct btrace_function
*bfun
, *callee
;
1774 struct btrace_frame_cache
*cache
;
1775 struct frame_info
*next
;
1777 next
= get_next_frame (this_frame
);
1781 callee
= btrace_get_frame_function (next
);
1785 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1792 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1793 btrace_get_bfun_name (bfun
), bfun
->level
);
1795 /* This is our frame. Initialize the frame cache. */
1796 cache
= bfcache_new (this_frame
);
1797 cache
->tp
= find_thread_ptid (inferior_ptid
);
1800 *this_cache
= cache
;
1805 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1807 struct btrace_frame_cache
*cache
;
1810 cache
= (struct btrace_frame_cache
*) this_cache
;
1812 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1813 gdb_assert (slot
!= NULL
);
1815 htab_remove_elt (bfcache
, cache
);
1818 /* btrace recording does not store previous memory content, neither the stack
1819 frames content. Any unwinding would return errorneous results as the stack
1820 contents no longer matches the changed PC value restored from history.
1821 Therefore this unwinder reports any possibly unwound registers as
1824 const struct frame_unwind record_btrace_frame_unwind
=
1827 record_btrace_frame_unwind_stop_reason
,
1828 record_btrace_frame_this_id
,
1829 record_btrace_frame_prev_register
,
1831 record_btrace_frame_sniffer
,
1832 record_btrace_frame_dealloc_cache
1835 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1838 record_btrace_frame_unwind_stop_reason
,
1839 record_btrace_frame_this_id
,
1840 record_btrace_frame_prev_register
,
1842 record_btrace_tailcall_frame_sniffer
,
1843 record_btrace_frame_dealloc_cache
1846 /* Implement the to_get_unwinder method. */
1848 static const struct frame_unwind
*
1849 record_btrace_to_get_unwinder (struct target_ops
*self
)
1851 return &record_btrace_frame_unwind
;
1854 /* Implement the to_get_tailcall_unwinder method. */
1856 static const struct frame_unwind
*
1857 record_btrace_to_get_tailcall_unwinder (struct target_ops
*self
)
1859 return &record_btrace_tailcall_frame_unwind
;
1862 /* Return a human-readable string for FLAG. */
1865 btrace_thread_flag_to_str (enum btrace_thread_flag flag
)
1873 return "reverse-step";
1879 return "reverse-cont";
1888 /* Indicate that TP should be resumed according to FLAG. */
1891 record_btrace_resume_thread (struct thread_info
*tp
,
1892 enum btrace_thread_flag flag
)
1894 struct btrace_thread_info
*btinfo
;
1896 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1897 target_pid_to_str (tp
->ptid
), flag
, btrace_thread_flag_to_str (flag
));
1899 btinfo
= &tp
->btrace
;
1901 /* Fetch the latest branch trace. */
1904 /* A resume request overwrites a preceding resume or stop request. */
1905 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1906 btinfo
->flags
|= flag
;
1909 /* Get the current frame for TP. */
1911 static struct frame_info
*
1912 get_thread_current_frame (struct thread_info
*tp
)
1914 struct frame_info
*frame
;
1915 ptid_t old_inferior_ptid
;
1918 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1919 old_inferior_ptid
= inferior_ptid
;
1920 inferior_ptid
= tp
->ptid
;
1922 /* Clear the executing flag to allow changes to the current frame.
1923 We are not actually running, yet. We just started a reverse execution
1924 command or a record goto command.
1925 For the latter, EXECUTING is false and this has no effect.
1926 For the former, EXECUTING is true and we're in to_wait, about to
1927 move the thread. Since we need to recompute the stack, we temporarily
1928 set EXECUTING to flase. */
1929 executing
= is_executing (inferior_ptid
);
1930 set_executing (inferior_ptid
, 0);
1935 frame
= get_current_frame ();
1937 CATCH (except
, RETURN_MASK_ALL
)
1939 /* Restore the previous execution state. */
1940 set_executing (inferior_ptid
, executing
);
1942 /* Restore the previous inferior_ptid. */
1943 inferior_ptid
= old_inferior_ptid
;
1945 throw_exception (except
);
1949 /* Restore the previous execution state. */
1950 set_executing (inferior_ptid
, executing
);
1952 /* Restore the previous inferior_ptid. */
1953 inferior_ptid
= old_inferior_ptid
;
1958 /* Start replaying a thread. */
1960 static struct btrace_insn_iterator
*
1961 record_btrace_start_replaying (struct thread_info
*tp
)
1963 struct btrace_insn_iterator
*replay
;
1964 struct btrace_thread_info
*btinfo
;
1966 btinfo
= &tp
->btrace
;
1969 /* We can't start replaying without trace. */
1970 if (btinfo
->begin
== NULL
)
1973 /* GDB stores the current frame_id when stepping in order to detects steps
1975 Since frames are computed differently when we're replaying, we need to
1976 recompute those stored frames and fix them up so we can still detect
1977 subroutines after we started replaying. */
1980 struct frame_info
*frame
;
1981 struct frame_id frame_id
;
1982 int upd_step_frame_id
, upd_step_stack_frame_id
;
1984 /* The current frame without replaying - computed via normal unwind. */
1985 frame
= get_thread_current_frame (tp
);
1986 frame_id
= get_frame_id (frame
);
1988 /* Check if we need to update any stepping-related frame id's. */
1989 upd_step_frame_id
= frame_id_eq (frame_id
,
1990 tp
->control
.step_frame_id
);
1991 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
1992 tp
->control
.step_stack_frame_id
);
1994 /* We start replaying at the end of the branch trace. This corresponds
1995 to the current instruction. */
1996 replay
= XNEW (struct btrace_insn_iterator
);
1997 btrace_insn_end (replay
, btinfo
);
1999 /* Skip gaps at the end of the trace. */
2000 while (btrace_insn_get (replay
) == NULL
)
2004 steps
= btrace_insn_prev (replay
, 1);
2006 error (_("No trace."));
2009 /* We're not replaying, yet. */
2010 gdb_assert (btinfo
->replay
== NULL
);
2011 btinfo
->replay
= replay
;
2013 /* Make sure we're not using any stale registers. */
2014 registers_changed_ptid (tp
->ptid
);
2016 /* The current frame with replaying - computed via btrace unwind. */
2017 frame
= get_thread_current_frame (tp
);
2018 frame_id
= get_frame_id (frame
);
2020 /* Replace stepping related frames where necessary. */
2021 if (upd_step_frame_id
)
2022 tp
->control
.step_frame_id
= frame_id
;
2023 if (upd_step_stack_frame_id
)
2024 tp
->control
.step_stack_frame_id
= frame_id
;
2026 CATCH (except
, RETURN_MASK_ALL
)
2028 xfree (btinfo
->replay
);
2029 btinfo
->replay
= NULL
;
2031 registers_changed_ptid (tp
->ptid
);
2033 throw_exception (except
);
2040 /* Stop replaying a thread. */
2043 record_btrace_stop_replaying (struct thread_info
*tp
)
2045 struct btrace_thread_info
*btinfo
;
2047 btinfo
= &tp
->btrace
;
2049 xfree (btinfo
->replay
);
2050 btinfo
->replay
= NULL
;
2052 /* Make sure we're not leaving any stale registers. */
2053 registers_changed_ptid (tp
->ptid
);
2056 /* Stop replaying TP if it is at the end of its execution history. */
2059 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2061 struct btrace_insn_iterator
*replay
, end
;
2062 struct btrace_thread_info
*btinfo
;
2064 btinfo
= &tp
->btrace
;
2065 replay
= btinfo
->replay
;
2070 btrace_insn_end (&end
, btinfo
);
2072 if (btrace_insn_cmp (replay
, &end
) == 0)
2073 record_btrace_stop_replaying (tp
);
2076 /* The to_resume method of target record-btrace. */
2079 record_btrace_resume (struct target_ops
*ops
, ptid_t ptid
, int step
,
2080 enum gdb_signal signal
)
2082 struct thread_info
*tp
;
2083 enum btrace_thread_flag flag
, cflag
;
2085 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
),
2086 execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2087 step
? "step" : "cont");
2089 /* Store the execution direction of the last resume.
2091 If there is more than one to_resume call, we have to rely on infrun
2092 to not change the execution direction in-between. */
2093 record_btrace_resume_exec_dir
= execution_direction
;
2095 /* As long as we're not replaying, just forward the request.
2097 For non-stop targets this means that no thread is replaying. In order to
2098 make progress, we may need to explicitly move replaying threads to the end
2099 of their execution history. */
2100 if ((execution_direction
!= EXEC_REVERSE
)
2101 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2104 ops
->to_resume (ops
, ptid
, step
, signal
);
2108 /* Compute the btrace thread flag for the requested move. */
2109 if (execution_direction
== EXEC_REVERSE
)
2111 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2116 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2120 /* We just indicate the resume intent here. The actual stepping happens in
2121 record_btrace_wait below.
2123 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2124 if (!target_is_non_stop_p ())
2126 gdb_assert (ptid_match (inferior_ptid
, ptid
));
2128 ALL_NON_EXITED_THREADS (tp
)
2129 if (ptid_match (tp
->ptid
, ptid
))
2131 if (ptid_match (tp
->ptid
, inferior_ptid
))
2132 record_btrace_resume_thread (tp
, flag
);
2134 record_btrace_resume_thread (tp
, cflag
);
2139 ALL_NON_EXITED_THREADS (tp
)
2140 if (ptid_match (tp
->ptid
, ptid
))
2141 record_btrace_resume_thread (tp
, flag
);
2144 /* Async support. */
2145 if (target_can_async_p ())
2148 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2152 /* The to_commit_resume method of target record-btrace. */
2155 record_btrace_commit_resume (struct target_ops
*ops
)
2157 if ((execution_direction
!= EXEC_REVERSE
)
2158 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2159 ops
->beneath
->to_commit_resume (ops
->beneath
);
2162 /* Cancel resuming TP. */
2165 record_btrace_cancel_resume (struct thread_info
*tp
)
2167 enum btrace_thread_flag flags
;
2169 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2173 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2174 print_thread_id (tp
),
2175 target_pid_to_str (tp
->ptid
), flags
,
2176 btrace_thread_flag_to_str (flags
));
2178 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2179 record_btrace_stop_replaying_at_end (tp
);
2182 /* Return a target_waitstatus indicating that we ran out of history. */
2184 static struct target_waitstatus
2185 btrace_step_no_history (void)
2187 struct target_waitstatus status
;
2189 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2194 /* Return a target_waitstatus indicating that a step finished. */
2196 static struct target_waitstatus
2197 btrace_step_stopped (void)
2199 struct target_waitstatus status
;
2201 status
.kind
= TARGET_WAITKIND_STOPPED
;
2202 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2207 /* Return a target_waitstatus indicating that a thread was stopped as
2210 static struct target_waitstatus
2211 btrace_step_stopped_on_request (void)
2213 struct target_waitstatus status
;
2215 status
.kind
= TARGET_WAITKIND_STOPPED
;
2216 status
.value
.sig
= GDB_SIGNAL_0
;
2221 /* Return a target_waitstatus indicating a spurious stop. */
2223 static struct target_waitstatus
2224 btrace_step_spurious (void)
2226 struct target_waitstatus status
;
2228 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2233 /* Return a target_waitstatus indicating that the thread was not resumed. */
2235 static struct target_waitstatus
2236 btrace_step_no_resumed (void)
2238 struct target_waitstatus status
;
2240 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2245 /* Return a target_waitstatus indicating that we should wait again. */
2247 static struct target_waitstatus
2248 btrace_step_again (void)
2250 struct target_waitstatus status
;
2252 status
.kind
= TARGET_WAITKIND_IGNORE
;
2257 /* Clear the record histories. */
2260 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2262 xfree (btinfo
->insn_history
);
2263 xfree (btinfo
->call_history
);
2265 btinfo
->insn_history
= NULL
;
2266 btinfo
->call_history
= NULL
;
2269 /* Check whether TP's current replay position is at a breakpoint. */
2272 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2274 struct btrace_insn_iterator
*replay
;
2275 struct btrace_thread_info
*btinfo
;
2276 const struct btrace_insn
*insn
;
2277 struct inferior
*inf
;
2279 btinfo
= &tp
->btrace
;
2280 replay
= btinfo
->replay
;
2285 insn
= btrace_insn_get (replay
);
2289 inf
= find_inferior_ptid (tp
->ptid
);
2293 return record_check_stopped_by_breakpoint (inf
->aspace
, insn
->pc
,
2294 &btinfo
->stop_reason
);
2297 /* Step one instruction in forward direction. */
2299 static struct target_waitstatus
2300 record_btrace_single_step_forward (struct thread_info
*tp
)
2302 struct btrace_insn_iterator
*replay
, end
, start
;
2303 struct btrace_thread_info
*btinfo
;
2305 btinfo
= &tp
->btrace
;
2306 replay
= btinfo
->replay
;
2308 /* We're done if we're not replaying. */
2310 return btrace_step_no_history ();
2312 /* Check if we're stepping a breakpoint. */
2313 if (record_btrace_replay_at_breakpoint (tp
))
2314 return btrace_step_stopped ();
2316 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2317 jump back to the instruction at which we started. */
2323 /* We will bail out here if we continue stepping after reaching the end
2324 of the execution history. */
2325 steps
= btrace_insn_next (replay
, 1);
2329 return btrace_step_no_history ();
2332 while (btrace_insn_get (replay
) == NULL
);
2334 /* Determine the end of the instruction trace. */
2335 btrace_insn_end (&end
, btinfo
);
2337 /* The execution trace contains (and ends with) the current instruction.
2338 This instruction has not been executed, yet, so the trace really ends
2339 one instruction earlier. */
2340 if (btrace_insn_cmp (replay
, &end
) == 0)
2341 return btrace_step_no_history ();
2343 return btrace_step_spurious ();
2346 /* Step one instruction in backward direction. */
2348 static struct target_waitstatus
2349 record_btrace_single_step_backward (struct thread_info
*tp
)
2351 struct btrace_insn_iterator
*replay
, start
;
2352 struct btrace_thread_info
*btinfo
;
2354 btinfo
= &tp
->btrace
;
2355 replay
= btinfo
->replay
;
2357 /* Start replaying if we're not already doing so. */
2359 replay
= record_btrace_start_replaying (tp
);
2361 /* If we can't step any further, we reached the end of the history.
2362 Skip gaps during replay. If we end up at a gap (at the beginning of
2363 the trace), jump back to the instruction at which we started. */
2369 steps
= btrace_insn_prev (replay
, 1);
2373 return btrace_step_no_history ();
2376 while (btrace_insn_get (replay
) == NULL
);
2378 /* Check if we're stepping a breakpoint.
2380 For reverse-stepping, this check is after the step. There is logic in
2381 infrun.c that handles reverse-stepping separately. See, for example,
2382 proceed and adjust_pc_after_break.
2384 This code assumes that for reverse-stepping, PC points to the last
2385 de-executed instruction, whereas for forward-stepping PC points to the
2386 next to-be-executed instruction. */
2387 if (record_btrace_replay_at_breakpoint (tp
))
2388 return btrace_step_stopped ();
2390 return btrace_step_spurious ();
2393 /* Step a single thread. */
2395 static struct target_waitstatus
2396 record_btrace_step_thread (struct thread_info
*tp
)
2398 struct btrace_thread_info
*btinfo
;
2399 struct target_waitstatus status
;
2400 enum btrace_thread_flag flags
;
2402 btinfo
= &tp
->btrace
;
2404 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2405 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2407 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2408 target_pid_to_str (tp
->ptid
), flags
,
2409 btrace_thread_flag_to_str (flags
));
2411 /* We can't step without an execution history. */
2412 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2413 return btrace_step_no_history ();
2418 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2421 return btrace_step_stopped_on_request ();
2424 status
= record_btrace_single_step_forward (tp
);
2425 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2428 return btrace_step_stopped ();
2431 status
= record_btrace_single_step_backward (tp
);
2432 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2435 return btrace_step_stopped ();
2438 status
= record_btrace_single_step_forward (tp
);
2439 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2442 btinfo
->flags
|= flags
;
2443 return btrace_step_again ();
2446 status
= record_btrace_single_step_backward (tp
);
2447 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2450 btinfo
->flags
|= flags
;
2451 return btrace_step_again ();
2454 /* We keep threads moving at the end of their execution history. The to_wait
2455 method will stop the thread for whom the event is reported. */
2456 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2457 btinfo
->flags
|= flags
;
2462 /* A vector of threads. */
2464 typedef struct thread_info
* tp_t
;
2467 /* Announce further events if necessary. */
2470 record_btrace_maybe_mark_async_event (const VEC (tp_t
) *moving
,
2471 const VEC (tp_t
) *no_history
)
2473 int more_moving
, more_no_history
;
2475 more_moving
= !VEC_empty (tp_t
, moving
);
2476 more_no_history
= !VEC_empty (tp_t
, no_history
);
2478 if (!more_moving
&& !more_no_history
)
2482 DEBUG ("movers pending");
2484 if (more_no_history
)
2485 DEBUG ("no-history pending");
2487 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2490 /* The to_wait method of target record-btrace. */
2493 record_btrace_wait (struct target_ops
*ops
, ptid_t ptid
,
2494 struct target_waitstatus
*status
, int options
)
2496 VEC (tp_t
) *moving
, *no_history
;
2497 struct thread_info
*tp
, *eventing
;
2498 struct cleanup
*cleanups
= make_cleanup (null_cleanup
, NULL
);
2500 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
), options
);
2502 /* As long as we're not replaying, just forward the request. */
2503 if ((execution_direction
!= EXEC_REVERSE
)
2504 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2507 return ops
->to_wait (ops
, ptid
, status
, options
);
2513 make_cleanup (VEC_cleanup (tp_t
), &moving
);
2514 make_cleanup (VEC_cleanup (tp_t
), &no_history
);
2516 /* Keep a work list of moving threads. */
2517 ALL_NON_EXITED_THREADS (tp
)
2518 if (ptid_match (tp
->ptid
, ptid
)
2519 && ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0))
2520 VEC_safe_push (tp_t
, moving
, tp
);
2522 if (VEC_empty (tp_t
, moving
))
2524 *status
= btrace_step_no_resumed ();
2526 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
),
2527 target_waitstatus_to_string (status
));
2529 do_cleanups (cleanups
);
2533 /* Step moving threads one by one, one step each, until either one thread
2534 reports an event or we run out of threads to step.
2536 When stepping more than one thread, chances are that some threads reach
2537 the end of their execution history earlier than others. If we reported
2538 this immediately, all-stop on top of non-stop would stop all threads and
2539 resume the same threads next time. And we would report the same thread
2540 having reached the end of its execution history again.
2542 In the worst case, this would starve the other threads. But even if other
2543 threads would be allowed to make progress, this would result in far too
2544 many intermediate stops.
2546 We therefore delay the reporting of "no execution history" until we have
2547 nothing else to report. By this time, all threads should have moved to
2548 either the beginning or the end of their execution history. There will
2549 be a single user-visible stop. */
2551 while ((eventing
== NULL
) && !VEC_empty (tp_t
, moving
))
2556 while ((eventing
== NULL
) && VEC_iterate (tp_t
, moving
, ix
, tp
))
2558 *status
= record_btrace_step_thread (tp
);
2560 switch (status
->kind
)
2562 case TARGET_WAITKIND_IGNORE
:
2566 case TARGET_WAITKIND_NO_HISTORY
:
2567 VEC_safe_push (tp_t
, no_history
,
2568 VEC_ordered_remove (tp_t
, moving
, ix
));
2572 eventing
= VEC_unordered_remove (tp_t
, moving
, ix
);
2578 if (eventing
== NULL
)
2580 /* We started with at least one moving thread. This thread must have
2581 either stopped or reached the end of its execution history.
2583 In the former case, EVENTING must not be NULL.
2584 In the latter case, NO_HISTORY must not be empty. */
2585 gdb_assert (!VEC_empty (tp_t
, no_history
));
2587 /* We kept threads moving at the end of their execution history. Stop
2588 EVENTING now that we are going to report its stop. */
2589 eventing
= VEC_unordered_remove (tp_t
, no_history
, 0);
2590 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2592 *status
= btrace_step_no_history ();
2595 gdb_assert (eventing
!= NULL
);
2597 /* We kept threads replaying at the end of their execution history. Stop
2598 replaying EVENTING now that we are going to report its stop. */
2599 record_btrace_stop_replaying_at_end (eventing
);
2601 /* Stop all other threads. */
2602 if (!target_is_non_stop_p ())
2603 ALL_NON_EXITED_THREADS (tp
)
2604 record_btrace_cancel_resume (tp
);
2606 /* In async mode, we need to announce further events. */
2607 if (target_is_async_p ())
2608 record_btrace_maybe_mark_async_event (moving
, no_history
);
2610 /* Start record histories anew from the current position. */
2611 record_btrace_clear_histories (&eventing
->btrace
);
2613 /* We moved the replay position but did not update registers. */
2614 registers_changed_ptid (eventing
->ptid
);
2616 DEBUG ("wait ended by thread %s (%s): %s",
2617 print_thread_id (eventing
),
2618 target_pid_to_str (eventing
->ptid
),
2619 target_waitstatus_to_string (status
));
2621 do_cleanups (cleanups
);
2622 return eventing
->ptid
;
2625 /* The to_stop method of target record-btrace. */
2628 record_btrace_stop (struct target_ops
*ops
, ptid_t ptid
)
2630 DEBUG ("stop %s", target_pid_to_str (ptid
));
2632 /* As long as we're not replaying, just forward the request. */
2633 if ((execution_direction
!= EXEC_REVERSE
)
2634 && !record_btrace_is_replaying (ops
, minus_one_ptid
))
2637 ops
->to_stop (ops
, ptid
);
2641 struct thread_info
*tp
;
2643 ALL_NON_EXITED_THREADS (tp
)
2644 if (ptid_match (tp
->ptid
, ptid
))
2646 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2647 tp
->btrace
.flags
|= BTHR_STOP
;
2652 /* The to_can_execute_reverse method of target record-btrace. */
2655 record_btrace_can_execute_reverse (struct target_ops
*self
)
2660 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2663 record_btrace_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2665 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2667 struct thread_info
*tp
= inferior_thread ();
2669 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2672 return ops
->beneath
->to_stopped_by_sw_breakpoint (ops
->beneath
);
2675 /* The to_supports_stopped_by_sw_breakpoint method of target
2679 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops
*ops
)
2681 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2684 return ops
->beneath
->to_supports_stopped_by_sw_breakpoint (ops
->beneath
);
2687 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2690 record_btrace_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2692 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2694 struct thread_info
*tp
= inferior_thread ();
2696 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2699 return ops
->beneath
->to_stopped_by_hw_breakpoint (ops
->beneath
);
2702 /* The to_supports_stopped_by_hw_breakpoint method of target
2706 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops
*ops
)
2708 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2711 return ops
->beneath
->to_supports_stopped_by_hw_breakpoint (ops
->beneath
);
2714 /* The to_update_thread_list method of target record-btrace. */
2717 record_btrace_update_thread_list (struct target_ops
*ops
)
2719 /* We don't add or remove threads during replay. */
2720 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2723 /* Forward the request. */
2725 ops
->to_update_thread_list (ops
);
2728 /* The to_thread_alive method of target record-btrace. */
2731 record_btrace_thread_alive (struct target_ops
*ops
, ptid_t ptid
)
2733 /* We don't add or remove threads during replay. */
2734 if (record_btrace_is_replaying (ops
, minus_one_ptid
))
2735 return find_thread_ptid (ptid
) != NULL
;
2737 /* Forward the request. */
2739 return ops
->to_thread_alive (ops
, ptid
);
2742 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2746 record_btrace_set_replay (struct thread_info
*tp
,
2747 const struct btrace_insn_iterator
*it
)
2749 struct btrace_thread_info
*btinfo
;
2751 btinfo
= &tp
->btrace
;
2753 if (it
== NULL
|| it
->function
== NULL
)
2754 record_btrace_stop_replaying (tp
);
2757 if (btinfo
->replay
== NULL
)
2758 record_btrace_start_replaying (tp
);
2759 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2762 *btinfo
->replay
= *it
;
2763 registers_changed_ptid (tp
->ptid
);
2766 /* Start anew from the new replay position. */
2767 record_btrace_clear_histories (btinfo
);
2769 stop_pc
= regcache_read_pc (get_current_regcache ());
2770 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2773 /* The to_goto_record_begin method of target record-btrace. */
2776 record_btrace_goto_begin (struct target_ops
*self
)
2778 struct thread_info
*tp
;
2779 struct btrace_insn_iterator begin
;
2781 tp
= require_btrace_thread ();
2783 btrace_insn_begin (&begin
, &tp
->btrace
);
2785 /* Skip gaps at the beginning of the trace. */
2786 while (btrace_insn_get (&begin
) == NULL
)
2790 steps
= btrace_insn_next (&begin
, 1);
2792 error (_("No trace."));
2795 record_btrace_set_replay (tp
, &begin
);
2798 /* The to_goto_record_end method of target record-btrace. */
2801 record_btrace_goto_end (struct target_ops
*ops
)
2803 struct thread_info
*tp
;
2805 tp
= require_btrace_thread ();
2807 record_btrace_set_replay (tp
, NULL
);
2810 /* The to_goto_record method of target record-btrace. */
2813 record_btrace_goto (struct target_ops
*self
, ULONGEST insn
)
2815 struct thread_info
*tp
;
2816 struct btrace_insn_iterator it
;
2817 unsigned int number
;
2822 /* Check for wrap-arounds. */
2824 error (_("Instruction number out of range."));
2826 tp
= require_btrace_thread ();
2828 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2830 error (_("No such instruction."));
2832 record_btrace_set_replay (tp
, &it
);
2835 /* The to_record_stop_replaying method of target record-btrace. */
2838 record_btrace_stop_replaying_all (struct target_ops
*self
)
2840 struct thread_info
*tp
;
2842 ALL_NON_EXITED_THREADS (tp
)
2843 record_btrace_stop_replaying (tp
);
2846 /* The to_execution_direction target method. */
2848 static enum exec_direction_kind
2849 record_btrace_execution_direction (struct target_ops
*self
)
2851 return record_btrace_resume_exec_dir
;
2854 /* The to_prepare_to_generate_core target method. */
2857 record_btrace_prepare_to_generate_core (struct target_ops
*self
)
2859 record_btrace_generating_corefile
= 1;
2862 /* The to_done_generating_core target method. */
2865 record_btrace_done_generating_core (struct target_ops
*self
)
2867 record_btrace_generating_corefile
= 0;
2870 /* Initialize the record-btrace target ops. */
2873 init_record_btrace_ops (void)
2875 struct target_ops
*ops
;
2877 ops
= &record_btrace_ops
;
2878 ops
->to_shortname
= "record-btrace";
2879 ops
->to_longname
= "Branch tracing target";
2880 ops
->to_doc
= "Collect control-flow trace and provide the execution history.";
2881 ops
->to_open
= record_btrace_open
;
2882 ops
->to_close
= record_btrace_close
;
2883 ops
->to_async
= record_btrace_async
;
2884 ops
->to_detach
= record_detach
;
2885 ops
->to_disconnect
= record_btrace_disconnect
;
2886 ops
->to_mourn_inferior
= record_mourn_inferior
;
2887 ops
->to_kill
= record_kill
;
2888 ops
->to_stop_recording
= record_btrace_stop_recording
;
2889 ops
->to_info_record
= record_btrace_info
;
2890 ops
->to_insn_history
= record_btrace_insn_history
;
2891 ops
->to_insn_history_from
= record_btrace_insn_history_from
;
2892 ops
->to_insn_history_range
= record_btrace_insn_history_range
;
2893 ops
->to_call_history
= record_btrace_call_history
;
2894 ops
->to_call_history_from
= record_btrace_call_history_from
;
2895 ops
->to_call_history_range
= record_btrace_call_history_range
;
2896 ops
->to_record_is_replaying
= record_btrace_is_replaying
;
2897 ops
->to_record_will_replay
= record_btrace_will_replay
;
2898 ops
->to_record_stop_replaying
= record_btrace_stop_replaying_all
;
2899 ops
->to_xfer_partial
= record_btrace_xfer_partial
;
2900 ops
->to_remove_breakpoint
= record_btrace_remove_breakpoint
;
2901 ops
->to_insert_breakpoint
= record_btrace_insert_breakpoint
;
2902 ops
->to_fetch_registers
= record_btrace_fetch_registers
;
2903 ops
->to_store_registers
= record_btrace_store_registers
;
2904 ops
->to_prepare_to_store
= record_btrace_prepare_to_store
;
2905 ops
->to_get_unwinder
= &record_btrace_to_get_unwinder
;
2906 ops
->to_get_tailcall_unwinder
= &record_btrace_to_get_tailcall_unwinder
;
2907 ops
->to_resume
= record_btrace_resume
;
2908 ops
->to_commit_resume
= record_btrace_commit_resume
;
2909 ops
->to_wait
= record_btrace_wait
;
2910 ops
->to_stop
= record_btrace_stop
;
2911 ops
->to_update_thread_list
= record_btrace_update_thread_list
;
2912 ops
->to_thread_alive
= record_btrace_thread_alive
;
2913 ops
->to_goto_record_begin
= record_btrace_goto_begin
;
2914 ops
->to_goto_record_end
= record_btrace_goto_end
;
2915 ops
->to_goto_record
= record_btrace_goto
;
2916 ops
->to_can_execute_reverse
= record_btrace_can_execute_reverse
;
2917 ops
->to_stopped_by_sw_breakpoint
= record_btrace_stopped_by_sw_breakpoint
;
2918 ops
->to_supports_stopped_by_sw_breakpoint
2919 = record_btrace_supports_stopped_by_sw_breakpoint
;
2920 ops
->to_stopped_by_hw_breakpoint
= record_btrace_stopped_by_hw_breakpoint
;
2921 ops
->to_supports_stopped_by_hw_breakpoint
2922 = record_btrace_supports_stopped_by_hw_breakpoint
;
2923 ops
->to_execution_direction
= record_btrace_execution_direction
;
2924 ops
->to_prepare_to_generate_core
= record_btrace_prepare_to_generate_core
;
2925 ops
->to_done_generating_core
= record_btrace_done_generating_core
;
2926 ops
->to_stratum
= record_stratum
;
2927 ops
->to_magic
= OPS_MAGIC
;
2930 /* Start recording in BTS format. */
2933 cmd_record_btrace_bts_start (char *args
, int from_tty
)
2935 if (args
!= NULL
&& *args
!= 0)
2936 error (_("Invalid argument."));
2938 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2942 execute_command ("target record-btrace", from_tty
);
2944 CATCH (exception
, RETURN_MASK_ALL
)
2946 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2947 throw_exception (exception
);
2952 /* Start recording in Intel Processor Trace format. */
2955 cmd_record_btrace_pt_start (char *args
, int from_tty
)
2957 if (args
!= NULL
&& *args
!= 0)
2958 error (_("Invalid argument."));
2960 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2964 execute_command ("target record-btrace", from_tty
);
2966 CATCH (exception
, RETURN_MASK_ALL
)
2968 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2969 throw_exception (exception
);
2974 /* Alias for "target record". */
2977 cmd_record_btrace_start (char *args
, int from_tty
)
2979 if (args
!= NULL
&& *args
!= 0)
2980 error (_("Invalid argument."));
2982 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2986 execute_command ("target record-btrace", from_tty
);
2988 CATCH (exception
, RETURN_MASK_ALL
)
2990 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2994 execute_command ("target record-btrace", from_tty
);
2996 CATCH (exception
, RETURN_MASK_ALL
)
2998 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2999 throw_exception (exception
);
3006 /* The "set record btrace" command. */
3009 cmd_set_record_btrace (char *args
, int from_tty
)
3011 cmd_show_list (set_record_btrace_cmdlist
, from_tty
, "");
3014 /* The "show record btrace" command. */
3017 cmd_show_record_btrace (char *args
, int from_tty
)
3019 cmd_show_list (show_record_btrace_cmdlist
, from_tty
, "");
3022 /* The "show record btrace replay-memory-access" command. */
3025 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
3026 struct cmd_list_element
*c
, const char *value
)
3028 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
3029 replay_memory_access
);
3032 /* The "set record btrace bts" command. */
3035 cmd_set_record_btrace_bts (char *args
, int from_tty
)
3037 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3038 "by an appropriate subcommand.\n"));
3039 help_list (set_record_btrace_bts_cmdlist
, "set record btrace bts ",
3040 all_commands
, gdb_stdout
);
3043 /* The "show record btrace bts" command. */
3046 cmd_show_record_btrace_bts (char *args
, int from_tty
)
3048 cmd_show_list (show_record_btrace_bts_cmdlist
, from_tty
, "");
3051 /* The "set record btrace pt" command. */
3054 cmd_set_record_btrace_pt (char *args
, int from_tty
)
3056 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3057 "by an appropriate subcommand.\n"));
3058 help_list (set_record_btrace_pt_cmdlist
, "set record btrace pt ",
3059 all_commands
, gdb_stdout
);
3062 /* The "show record btrace pt" command. */
3065 cmd_show_record_btrace_pt (char *args
, int from_tty
)
3067 cmd_show_list (show_record_btrace_pt_cmdlist
, from_tty
, "");
3070 /* The "record bts buffer-size" show value function. */
3073 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3074 struct cmd_list_element
*c
,
3077 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3081 /* The "record pt buffer-size" show value function. */
3084 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3085 struct cmd_list_element
*c
,
3088 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3092 void _initialize_record_btrace (void);
3094 /* Initialize btrace commands. */
3097 _initialize_record_btrace (void)
3099 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3100 _("Start branch trace recording."), &record_btrace_cmdlist
,
3101 "record btrace ", 0, &record_cmdlist
);
3102 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3104 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3106 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3107 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3108 This format may not be available on all processors."),
3109 &record_btrace_cmdlist
);
3110 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3112 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3114 Start branch trace recording in Intel Processor Trace format.\n\n\
3115 This format may not be available on all processors."),
3116 &record_btrace_cmdlist
);
3117 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3119 add_prefix_cmd ("btrace", class_support
, cmd_set_record_btrace
,
3120 _("Set record options"), &set_record_btrace_cmdlist
,
3121 "set record btrace ", 0, &set_record_cmdlist
);
3123 add_prefix_cmd ("btrace", class_support
, cmd_show_record_btrace
,
3124 _("Show record options"), &show_record_btrace_cmdlist
,
3125 "show record btrace ", 0, &show_record_cmdlist
);
3127 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3128 replay_memory_access_types
, &replay_memory_access
, _("\
3129 Set what memory accesses are allowed during replay."), _("\
3130 Show what memory accesses are allowed during replay."),
3131 _("Default is READ-ONLY.\n\n\
3132 The btrace record target does not trace data.\n\
3133 The memory therefore corresponds to the live target and not \
3134 to the current replay position.\n\n\
3135 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3136 When READ-WRITE, allow accesses to read-only and read-write memory during \
3138 NULL
, cmd_show_replay_memory_access
,
3139 &set_record_btrace_cmdlist
,
3140 &show_record_btrace_cmdlist
);
3142 add_prefix_cmd ("bts", class_support
, cmd_set_record_btrace_bts
,
3143 _("Set record btrace bts options"),
3144 &set_record_btrace_bts_cmdlist
,
3145 "set record btrace bts ", 0, &set_record_btrace_cmdlist
);
3147 add_prefix_cmd ("bts", class_support
, cmd_show_record_btrace_bts
,
3148 _("Show record btrace bts options"),
3149 &show_record_btrace_bts_cmdlist
,
3150 "show record btrace bts ", 0, &show_record_btrace_cmdlist
);
3152 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3153 &record_btrace_conf
.bts
.size
,
3154 _("Set the record/replay bts buffer size."),
3155 _("Show the record/replay bts buffer size."), _("\
3156 When starting recording request a trace buffer of this size. \
3157 The actual buffer size may differ from the requested size. \
3158 Use \"info record\" to see the actual buffer size.\n\n\
3159 Bigger buffers allow longer recording but also take more time to process \
3160 the recorded execution trace.\n\n\
3161 The trace buffer size may not be changed while recording."), NULL
,
3162 show_record_bts_buffer_size_value
,
3163 &set_record_btrace_bts_cmdlist
,
3164 &show_record_btrace_bts_cmdlist
);
3166 add_prefix_cmd ("pt", class_support
, cmd_set_record_btrace_pt
,
3167 _("Set record btrace pt options"),
3168 &set_record_btrace_pt_cmdlist
,
3169 "set record btrace pt ", 0, &set_record_btrace_cmdlist
);
3171 add_prefix_cmd ("pt", class_support
, cmd_show_record_btrace_pt
,
3172 _("Show record btrace pt options"),
3173 &show_record_btrace_pt_cmdlist
,
3174 "show record btrace pt ", 0, &show_record_btrace_cmdlist
);
3176 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3177 &record_btrace_conf
.pt
.size
,
3178 _("Set the record/replay pt buffer size."),
3179 _("Show the record/replay pt buffer size."), _("\
3180 Bigger buffers allow longer recording but also take more time to process \
3181 the recorded execution.\n\
3182 The actual buffer size may differ from the requested size. Use \"info record\" \
3183 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3184 &set_record_btrace_pt_cmdlist
,
3185 &show_record_btrace_pt_cmdlist
);
3187 init_record_btrace_ops ();
3188 add_target (&record_btrace_ops
);
3190 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3193 record_btrace_conf
.bts
.size
= 64 * 1024;
3194 record_btrace_conf
.pt
.size
= 16 * 1024;