1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2021 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "gdbsupport/event-loop.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46 #include <forward_list>
48 static const target_info record_btrace_target_info
= {
50 N_("Branch tracing target"),
51 N_("Collect control-flow trace and provide the execution history.")
54 /* The target_ops of record-btrace. */
56 class record_btrace_target final
: public target_ops
59 const target_info
&info () const override
60 { return record_btrace_target_info
; }
62 strata
stratum () const override
{ return record_stratum
; }
64 void close () override
;
65 void async (int) override
;
67 void detach (inferior
*inf
, int from_tty
) override
68 { record_detach (this, inf
, from_tty
); }
70 void disconnect (const char *, int) override
;
72 void mourn_inferior () override
73 { record_mourn_inferior (this); }
76 { record_kill (this); }
78 enum record_method
record_method (ptid_t ptid
) override
;
80 void stop_recording () override
;
81 void info_record () override
;
83 void insn_history (int size
, gdb_disassembly_flags flags
) override
;
84 void insn_history_from (ULONGEST from
, int size
,
85 gdb_disassembly_flags flags
) override
;
86 void insn_history_range (ULONGEST begin
, ULONGEST end
,
87 gdb_disassembly_flags flags
) override
;
88 void call_history (int size
, record_print_flags flags
) override
;
89 void call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
91 void call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
94 bool record_is_replaying (ptid_t ptid
) override
;
95 bool record_will_replay (ptid_t ptid
, int dir
) override
;
96 void record_stop_replaying () override
;
98 enum target_xfer_status
xfer_partial (enum target_object object
,
101 const gdb_byte
*writebuf
,
102 ULONGEST offset
, ULONGEST len
,
103 ULONGEST
*xfered_len
) override
;
105 int insert_breakpoint (struct gdbarch
*,
106 struct bp_target_info
*) override
;
107 int remove_breakpoint (struct gdbarch
*, struct bp_target_info
*,
108 enum remove_bp_reason
) override
;
110 void fetch_registers (struct regcache
*, int) override
;
112 void store_registers (struct regcache
*, int) override
;
113 void prepare_to_store (struct regcache
*) override
;
115 const struct frame_unwind
*get_unwinder () override
;
117 const struct frame_unwind
*get_tailcall_unwinder () override
;
119 void resume (ptid_t
, int, enum gdb_signal
) override
;
120 ptid_t
wait (ptid_t
, struct target_waitstatus
*, target_wait_flags
) override
;
122 void stop (ptid_t
) override
;
123 void update_thread_list () override
;
124 bool thread_alive (ptid_t ptid
) override
;
125 void goto_record_begin () override
;
126 void goto_record_end () override
;
127 void goto_record (ULONGEST insn
) override
;
129 bool can_execute_reverse () override
;
131 bool stopped_by_sw_breakpoint () override
;
132 bool supports_stopped_by_sw_breakpoint () override
;
134 bool stopped_by_hw_breakpoint () override
;
135 bool supports_stopped_by_hw_breakpoint () override
;
137 enum exec_direction_kind
execution_direction () override
;
138 void prepare_to_generate_core () override
;
139 void done_generating_core () override
;
142 static record_btrace_target record_btrace_ops
;
144 /* Initialize the record-btrace target ops. */
146 /* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148 static const gdb::observers::token record_btrace_thread_observer_token
{};
150 /* Memory access types used in set/show record btrace replay-memory-access. */
151 static const char replay_memory_access_read_only
[] = "read-only";
152 static const char replay_memory_access_read_write
[] = "read-write";
153 static const char *const replay_memory_access_types
[] =
155 replay_memory_access_read_only
,
156 replay_memory_access_read_write
,
160 /* The currently allowed replay memory access type. */
161 static const char *replay_memory_access
= replay_memory_access_read_only
;
163 /* The cpu state kinds. */
164 enum record_btrace_cpu_state_kind
171 /* The current cpu state. */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state
= CS_AUTO
;
174 /* The current cpu for trace decode. */
175 static struct btrace_cpu record_btrace_cpu
;
177 /* Command lists for "set/show record btrace". */
178 static struct cmd_list_element
*set_record_btrace_cmdlist
;
179 static struct cmd_list_element
*show_record_btrace_cmdlist
;
181 /* The execution direction of the last resume we got. See record-full.c. */
182 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
184 /* The async event handler for reverse/replay execution. */
185 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
187 /* A flag indicating that we are currently generating a core file. */
188 static int record_btrace_generating_corefile
;
190 /* The current branch trace configuration. */
191 static struct btrace_config record_btrace_conf
;
193 /* Command list for "record btrace". */
194 static struct cmd_list_element
*record_btrace_cmdlist
;
196 /* Command lists for "set/show record btrace bts". */
197 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
198 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
200 /* Command lists for "set/show record btrace pt". */
201 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
202 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
204 /* Command list for "set record btrace cpu". */
205 static struct cmd_list_element
*set_record_btrace_cpu_cmdlist
;
207 /* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
210 #define DEBUG(msg, args...) \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
220 /* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222 const struct btrace_cpu
*
223 record_btrace_get_cpu (void)
225 switch (record_btrace_cpu_state
)
231 record_btrace_cpu
.vendor
= CV_UNKNOWN
;
234 return &record_btrace_cpu
;
237 error (_("Internal error: bad record btrace cpu state."));
240 /* Update the branch trace for the current thread and return a pointer to its
243 Throws an error if there is no thread or no trace. This function never
246 static struct thread_info
*
247 require_btrace_thread (void)
251 if (inferior_ptid
== null_ptid
)
252 error (_("No thread."));
254 thread_info
*tp
= inferior_thread ();
256 validate_registers_access ();
258 btrace_fetch (tp
, record_btrace_get_cpu ());
260 if (btrace_is_empty (tp
))
261 error (_("No trace."));
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
269 Throws an error if there is no thread or no trace. This function never
272 static struct btrace_thread_info
*
273 require_btrace (void)
275 struct thread_info
*tp
;
277 tp
= require_btrace_thread ();
282 /* Enable branch tracing for one thread. Warn on errors. */
285 record_btrace_enable_warn (struct thread_info
*tp
)
287 /* Ignore this thread if its inferior is not recorded by us. */
288 target_ops
*rec
= tp
->inf
->target_at (record_stratum
);
289 if (rec
!= &record_btrace_ops
)
294 btrace_enable (tp
, &record_btrace_conf
);
296 catch (const gdb_exception_error
&error
)
298 warning ("%s", error
.what ());
302 /* Enable automatic tracing of new threads. */
305 record_btrace_auto_enable (void)
307 DEBUG ("attach thread observer");
309 gdb::observers::new_thread
.attach (record_btrace_enable_warn
,
310 record_btrace_thread_observer_token
);
313 /* Disable automatic tracing of new threads. */
316 record_btrace_auto_disable (void)
318 DEBUG ("detach thread observer");
320 gdb::observers::new_thread
.detach (record_btrace_thread_observer_token
);
323 /* The record-btrace async event handler function. */
326 record_btrace_handle_async_inferior_event (gdb_client_data data
)
328 inferior_event_handler (INF_REG_EVENT
);
331 /* See record-btrace.h. */
334 record_btrace_push_target (void)
338 record_btrace_auto_enable ();
340 push_target (&record_btrace_ops
);
342 record_btrace_async_inferior_event_handler
343 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
344 NULL
, "record-btrace");
345 record_btrace_generating_corefile
= 0;
347 format
= btrace_format_short_string (record_btrace_conf
.format
);
348 gdb::observers::record_changed
.notify (current_inferior (), 1, "btrace", format
);
351 /* Disable btrace on a set of threads on scope exit. */
353 struct scoped_btrace_disable
355 scoped_btrace_disable () = default;
357 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable
);
359 ~scoped_btrace_disable ()
361 for (thread_info
*tp
: m_threads
)
365 void add_thread (thread_info
*thread
)
367 m_threads
.push_front (thread
);
376 std::forward_list
<thread_info
*> m_threads
;
379 /* Open target record-btrace. */
382 record_btrace_target_open (const char *args
, int from_tty
)
384 /* If we fail to enable btrace for one thread, disable it for the threads for
385 which it was successfully enabled. */
386 scoped_btrace_disable btrace_disable
;
392 if (!target_has_execution ())
393 error (_("The program is not being run."));
395 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
396 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
398 btrace_enable (tp
, &record_btrace_conf
);
400 btrace_disable
.add_thread (tp
);
403 record_btrace_push_target ();
405 btrace_disable
.discard ();
408 /* The stop_recording method of target record-btrace. */
411 record_btrace_target::stop_recording ()
413 DEBUG ("stop recording");
415 record_btrace_auto_disable ();
417 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
418 if (tp
->btrace
.target
!= NULL
)
422 /* The disconnect method of target record-btrace. */
425 record_btrace_target::disconnect (const char *args
,
428 struct target_ops
*beneath
= this->beneath ();
430 /* Do not stop recording, just clean up GDB side. */
431 current_inferior ()->unpush_target (this);
433 /* Forward disconnect. */
434 beneath
->disconnect (args
, from_tty
);
437 /* The close method of target record-btrace. */
440 record_btrace_target::close ()
442 if (record_btrace_async_inferior_event_handler
!= NULL
)
443 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
445 /* Make sure automatic recording gets disabled even if we did not stop
446 recording before closing the record-btrace target. */
447 record_btrace_auto_disable ();
449 /* We should have already stopped recording.
450 Tear down btrace in case we have not. */
451 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
452 btrace_teardown (tp
);
455 /* The async method of target record-btrace. */
458 record_btrace_target::async (int enable
)
461 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
463 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
465 this->beneath ()->async (enable
);
468 /* Adjusts the size and returns a human readable size suffix. */
471 record_btrace_adjust_size (unsigned int *size
)
477 if ((sz
& ((1u << 30) - 1)) == 0)
482 else if ((sz
& ((1u << 20) - 1)) == 0)
487 else if ((sz
& ((1u << 10) - 1)) == 0)
496 /* Print a BTS configuration. */
499 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
507 suffix
= record_btrace_adjust_size (&size
);
508 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
512 /* Print an Intel Processor Trace configuration. */
515 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
523 suffix
= record_btrace_adjust_size (&size
);
524 printf_unfiltered (_("Buffer size: %u%s.\n"), size
, suffix
);
528 /* Print a branch tracing configuration. */
531 record_btrace_print_conf (const struct btrace_config
*conf
)
533 printf_unfiltered (_("Recording format: %s.\n"),
534 btrace_format_string (conf
->format
));
536 switch (conf
->format
)
538 case BTRACE_FORMAT_NONE
:
541 case BTRACE_FORMAT_BTS
:
542 record_btrace_print_bts_conf (&conf
->bts
);
545 case BTRACE_FORMAT_PT
:
546 record_btrace_print_pt_conf (&conf
->pt
);
550 internal_error (__FILE__
, __LINE__
, _("Unknown branch trace format."));
553 /* The info_record method of target record-btrace. */
556 record_btrace_target::info_record ()
558 struct btrace_thread_info
*btinfo
;
559 const struct btrace_config
*conf
;
560 struct thread_info
*tp
;
561 unsigned int insns
, calls
, gaps
;
565 if (inferior_ptid
== null_ptid
)
566 error (_("No thread."));
568 tp
= inferior_thread ();
570 validate_registers_access ();
572 btinfo
= &tp
->btrace
;
574 conf
= ::btrace_conf (btinfo
);
576 record_btrace_print_conf (conf
);
578 btrace_fetch (tp
, record_btrace_get_cpu ());
584 if (!btrace_is_empty (tp
))
586 struct btrace_call_iterator call
;
587 struct btrace_insn_iterator insn
;
589 btrace_call_end (&call
, btinfo
);
590 btrace_call_prev (&call
, 1);
591 calls
= btrace_call_number (&call
);
593 btrace_insn_end (&insn
, btinfo
);
594 insns
= btrace_insn_number (&insn
);
596 /* If the last instruction is not a gap, it is the current instruction
597 that is not actually part of the record. */
598 if (btrace_insn_get (&insn
) != NULL
)
601 gaps
= btinfo
->ngaps
;
604 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
605 "for thread %s (%s).\n"), insns
, calls
, gaps
,
606 print_thread_id (tp
),
607 target_pid_to_str (tp
->ptid
).c_str ());
609 if (btrace_is_replaying (tp
))
610 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
611 btrace_insn_number (btinfo
->replay
));
614 /* Print a decode error. */
617 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
618 enum btrace_format format
)
620 const char *errstr
= btrace_decode_error (format
, errcode
);
622 uiout
->text (_("["));
623 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
624 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
626 uiout
->text (_("decode error ("));
627 uiout
->field_signed ("errcode", errcode
);
628 uiout
->text (_("): "));
630 uiout
->text (errstr
);
631 uiout
->text (_("]\n"));
634 /* A range of source lines. */
636 struct btrace_line_range
638 /* The symtab this line is from. */
639 struct symtab
*symtab
;
641 /* The first line (inclusive). */
644 /* The last line (exclusive). */
648 /* Construct a line range. */
650 static struct btrace_line_range
651 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
653 struct btrace_line_range range
;
655 range
.symtab
= symtab
;
662 /* Add a line to a line range. */
664 static struct btrace_line_range
665 btrace_line_range_add (struct btrace_line_range range
, int line
)
667 if (range
.end
<= range
.begin
)
669 /* This is the first entry. */
671 range
.end
= line
+ 1;
673 else if (line
< range
.begin
)
675 else if (range
.end
< line
)
681 /* Return non-zero if RANGE is empty, zero otherwise. */
684 btrace_line_range_is_empty (struct btrace_line_range range
)
686 return range
.end
<= range
.begin
;
689 /* Return non-zero if LHS contains RHS, zero otherwise. */
692 btrace_line_range_contains_range (struct btrace_line_range lhs
,
693 struct btrace_line_range rhs
)
695 return ((lhs
.symtab
== rhs
.symtab
)
696 && (lhs
.begin
<= rhs
.begin
)
697 && (rhs
.end
<= lhs
.end
));
700 /* Find the line range associated with PC. */
702 static struct btrace_line_range
703 btrace_find_line_range (CORE_ADDR pc
)
705 struct btrace_line_range range
;
706 struct linetable_entry
*lines
;
707 struct linetable
*ltable
;
708 struct symtab
*symtab
;
711 symtab
= find_pc_line_symtab (pc
);
713 return btrace_mk_line_range (NULL
, 0, 0);
715 ltable
= SYMTAB_LINETABLE (symtab
);
717 return btrace_mk_line_range (symtab
, 0, 0);
719 nlines
= ltable
->nitems
;
720 lines
= ltable
->item
;
722 return btrace_mk_line_range (symtab
, 0, 0);
724 range
= btrace_mk_line_range (symtab
, 0, 0);
725 for (i
= 0; i
< nlines
- 1; i
++)
727 /* The test of is_stmt here was added when the is_stmt field was
728 introduced to the 'struct linetable_entry' structure. This
729 ensured that this loop maintained the same behaviour as before we
730 introduced is_stmt. That said, it might be that we would be
731 better off not checking is_stmt here, this would lead to us
732 possibly adding more line numbers to the range. At the time this
733 change was made I was unsure how to test this so chose to go with
734 maintaining the existing experience. */
735 if ((lines
[i
].pc
== pc
) && (lines
[i
].line
!= 0)
736 && (lines
[i
].is_stmt
== 1))
737 range
= btrace_line_range_add (range
, lines
[i
].line
);
743 /* Print source lines in LINES to UIOUT.
745 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
746 instructions corresponding to that source line. When printing a new source
747 line, we do the cleanups for the open chain and open a new cleanup chain for
748 the new source line. If the source line range in LINES is not empty, this
749 function will leave the cleanup chain for the last printed source line open
750 so instructions can be added to it. */
753 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
754 gdb::optional
<ui_out_emit_tuple
> *src_and_asm_tuple
,
755 gdb::optional
<ui_out_emit_list
> *asm_list
,
756 gdb_disassembly_flags flags
)
758 print_source_lines_flags psl_flags
;
760 if (flags
& DISASSEMBLY_FILENAME
)
761 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
763 for (int line
= lines
.begin
; line
< lines
.end
; ++line
)
767 src_and_asm_tuple
->emplace (uiout
, "src_and_asm_line");
769 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
771 asm_list
->emplace (uiout
, "line_asm_insn");
775 /* Disassemble a section of the recorded instruction trace. */
778 btrace_insn_history (struct ui_out
*uiout
,
779 const struct btrace_thread_info
*btinfo
,
780 const struct btrace_insn_iterator
*begin
,
781 const struct btrace_insn_iterator
*end
,
782 gdb_disassembly_flags flags
)
784 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
785 btrace_insn_number (begin
), btrace_insn_number (end
));
787 flags
|= DISASSEMBLY_SPECULATIVE
;
789 struct gdbarch
*gdbarch
= target_gdbarch ();
790 btrace_line_range last_lines
= btrace_mk_line_range (NULL
, 0, 0);
792 ui_out_emit_list
list_emitter (uiout
, "asm_insns");
794 gdb::optional
<ui_out_emit_tuple
> src_and_asm_tuple
;
795 gdb::optional
<ui_out_emit_list
> asm_list
;
797 gdb_pretty_print_disassembler
disasm (gdbarch
, uiout
);
799 for (btrace_insn_iterator it
= *begin
; btrace_insn_cmp (&it
, end
) != 0;
800 btrace_insn_next (&it
, 1))
802 const struct btrace_insn
*insn
;
804 insn
= btrace_insn_get (&it
);
806 /* A NULL instruction indicates a gap in the trace. */
809 const struct btrace_config
*conf
;
811 conf
= btrace_conf (btinfo
);
813 /* We have trace so we must have a configuration. */
814 gdb_assert (conf
!= NULL
);
816 uiout
->field_fmt ("insn-number", "%u",
817 btrace_insn_number (&it
));
820 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
825 struct disasm_insn dinsn
;
827 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
829 struct btrace_line_range lines
;
831 lines
= btrace_find_line_range (insn
->pc
);
832 if (!btrace_line_range_is_empty (lines
)
833 && !btrace_line_range_contains_range (last_lines
, lines
))
835 btrace_print_lines (lines
, uiout
, &src_and_asm_tuple
, &asm_list
,
839 else if (!src_and_asm_tuple
.has_value ())
841 gdb_assert (!asm_list
.has_value ());
843 src_and_asm_tuple
.emplace (uiout
, "src_and_asm_line");
845 /* No source information. */
846 asm_list
.emplace (uiout
, "line_asm_insn");
849 gdb_assert (src_and_asm_tuple
.has_value ());
850 gdb_assert (asm_list
.has_value ());
853 memset (&dinsn
, 0, sizeof (dinsn
));
854 dinsn
.number
= btrace_insn_number (&it
);
855 dinsn
.addr
= insn
->pc
;
857 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
858 dinsn
.is_speculative
= 1;
860 disasm
.pretty_print_insn (&dinsn
, flags
);
865 /* The insn_history method of target record-btrace. */
868 record_btrace_target::insn_history (int size
, gdb_disassembly_flags flags
)
870 struct btrace_thread_info
*btinfo
;
871 struct btrace_insn_history
*history
;
872 struct btrace_insn_iterator begin
, end
;
873 struct ui_out
*uiout
;
874 unsigned int context
, covered
;
876 uiout
= current_uiout
;
877 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
878 context
= abs (size
);
880 error (_("Bad record instruction-history-size."));
882 btinfo
= require_btrace ();
883 history
= btinfo
->insn_history
;
886 struct btrace_insn_iterator
*replay
;
888 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
890 /* If we're replaying, we start at the replay position. Otherwise, we
891 start at the tail of the trace. */
892 replay
= btinfo
->replay
;
896 btrace_insn_end (&begin
, btinfo
);
898 /* We start from here and expand in the requested direction. Then we
899 expand in the other direction, as well, to fill up any remaining
904 /* We want the current position covered, as well. */
905 covered
= btrace_insn_next (&end
, 1);
906 covered
+= btrace_insn_prev (&begin
, context
- covered
);
907 covered
+= btrace_insn_next (&end
, context
- covered
);
911 covered
= btrace_insn_next (&end
, context
);
912 covered
+= btrace_insn_prev (&begin
, context
- covered
);
917 begin
= history
->begin
;
920 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
921 btrace_insn_number (&begin
), btrace_insn_number (&end
));
926 covered
= btrace_insn_prev (&begin
, context
);
931 covered
= btrace_insn_next (&end
, context
);
936 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
940 printf_unfiltered (_("At the start of the branch trace record.\n"));
942 printf_unfiltered (_("At the end of the branch trace record.\n"));
945 btrace_set_insn_history (btinfo
, &begin
, &end
);
948 /* The insn_history_range method of target record-btrace. */
951 record_btrace_target::insn_history_range (ULONGEST from
, ULONGEST to
,
952 gdb_disassembly_flags flags
)
954 struct btrace_thread_info
*btinfo
;
955 struct btrace_insn_iterator begin
, end
;
956 struct ui_out
*uiout
;
957 unsigned int low
, high
;
960 uiout
= current_uiout
;
961 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
965 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
967 /* Check for wrap-arounds. */
968 if (low
!= from
|| high
!= to
)
969 error (_("Bad range."));
972 error (_("Bad range."));
974 btinfo
= require_btrace ();
976 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
978 error (_("Range out of bounds."));
980 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
983 /* Silently truncate the range. */
984 btrace_insn_end (&end
, btinfo
);
988 /* We want both begin and end to be inclusive. */
989 btrace_insn_next (&end
, 1);
992 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
993 btrace_set_insn_history (btinfo
, &begin
, &end
);
996 /* The insn_history_from method of target record-btrace. */
999 record_btrace_target::insn_history_from (ULONGEST from
, int size
,
1000 gdb_disassembly_flags flags
)
1002 ULONGEST begin
, end
, context
;
1004 context
= abs (size
);
1006 error (_("Bad record instruction-history-size."));
1015 begin
= from
- context
+ 1;
1020 end
= from
+ context
- 1;
1022 /* Check for wrap-around. */
1027 insn_history_range (begin
, end
, flags
);
1030 /* Print the instruction number range for a function call history line. */
1033 btrace_call_history_insn_range (struct ui_out
*uiout
,
1034 const struct btrace_function
*bfun
)
1036 unsigned int begin
, end
, size
;
1038 size
= bfun
->insn
.size ();
1039 gdb_assert (size
> 0);
1041 begin
= bfun
->insn_offset
;
1042 end
= begin
+ size
- 1;
1044 uiout
->field_unsigned ("insn begin", begin
);
1046 uiout
->field_unsigned ("insn end", end
);
1049 /* Compute the lowest and highest source line for the instructions in BFUN
1050 and return them in PBEGIN and PEND.
1051 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1052 result from inlining or macro expansion. */
1055 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
1056 int *pbegin
, int *pend
)
1058 struct symtab
*symtab
;
1069 symtab
= symbol_symtab (sym
);
1071 for (const btrace_insn
&insn
: bfun
->insn
)
1073 struct symtab_and_line sal
;
1075 sal
= find_pc_line (insn
.pc
, 0);
1076 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
1079 begin
= std::min (begin
, sal
.line
);
1080 end
= std::max (end
, sal
.line
);
1088 /* Print the source line information for a function call history line. */
1091 btrace_call_history_src_line (struct ui_out
*uiout
,
1092 const struct btrace_function
*bfun
)
1101 uiout
->field_string ("file",
1102 symtab_to_filename_for_display (symbol_symtab (sym
)),
1103 file_name_style
.style ());
1105 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1110 uiout
->field_signed ("min line", begin
);
1116 uiout
->field_signed ("max line", end
);
1119 /* Get the name of a branch trace function. */
1122 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1124 struct minimal_symbol
*msym
;
1134 return sym
->print_name ();
1135 else if (msym
!= NULL
)
1136 return msym
->print_name ();
1141 /* Disassemble a section of the recorded function trace. */
1144 btrace_call_history (struct ui_out
*uiout
,
1145 const struct btrace_thread_info
*btinfo
,
1146 const struct btrace_call_iterator
*begin
,
1147 const struct btrace_call_iterator
*end
,
1150 struct btrace_call_iterator it
;
1151 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1153 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1154 btrace_call_number (end
));
1156 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1158 const struct btrace_function
*bfun
;
1159 struct minimal_symbol
*msym
;
1162 bfun
= btrace_call_get (&it
);
1166 /* Print the function index. */
1167 uiout
->field_unsigned ("index", bfun
->number
);
1170 /* Indicate gaps in the trace. */
1171 if (bfun
->errcode
!= 0)
1173 const struct btrace_config
*conf
;
1175 conf
= btrace_conf (btinfo
);
1177 /* We have trace so we must have a configuration. */
1178 gdb_assert (conf
!= NULL
);
1180 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1185 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1187 int level
= bfun
->level
+ btinfo
->level
, i
;
1189 for (i
= 0; i
< level
; ++i
)
1194 uiout
->field_string ("function", sym
->print_name (),
1195 function_name_style
.style ());
1196 else if (msym
!= NULL
)
1197 uiout
->field_string ("function", msym
->print_name (),
1198 function_name_style
.style ());
1199 else if (!uiout
->is_mi_like_p ())
1200 uiout
->field_string ("function", "??",
1201 function_name_style
.style ());
1203 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1205 uiout
->text (_("\tinst "));
1206 btrace_call_history_insn_range (uiout
, bfun
);
1209 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1211 uiout
->text (_("\tat "));
1212 btrace_call_history_src_line (uiout
, bfun
);
1219 /* The call_history method of target record-btrace. */
1222 record_btrace_target::call_history (int size
, record_print_flags flags
)
1224 struct btrace_thread_info
*btinfo
;
1225 struct btrace_call_history
*history
;
1226 struct btrace_call_iterator begin
, end
;
1227 struct ui_out
*uiout
;
1228 unsigned int context
, covered
;
1230 uiout
= current_uiout
;
1231 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1232 context
= abs (size
);
1234 error (_("Bad record function-call-history-size."));
1236 btinfo
= require_btrace ();
1237 history
= btinfo
->call_history
;
1238 if (history
== NULL
)
1240 struct btrace_insn_iterator
*replay
;
1242 DEBUG ("call-history (0x%x): %d", (int) flags
, size
);
1244 /* If we're replaying, we start at the replay position. Otherwise, we
1245 start at the tail of the trace. */
1246 replay
= btinfo
->replay
;
1249 begin
.btinfo
= btinfo
;
1250 begin
.index
= replay
->call_index
;
1253 btrace_call_end (&begin
, btinfo
);
1255 /* We start from here and expand in the requested direction. Then we
1256 expand in the other direction, as well, to fill up any remaining
1261 /* We want the current position covered, as well. */
1262 covered
= btrace_call_next (&end
, 1);
1263 covered
+= btrace_call_prev (&begin
, context
- covered
);
1264 covered
+= btrace_call_next (&end
, context
- covered
);
1268 covered
= btrace_call_next (&end
, context
);
1269 covered
+= btrace_call_prev (&begin
, context
- covered
);
1274 begin
= history
->begin
;
1277 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags
, size
,
1278 btrace_call_number (&begin
), btrace_call_number (&end
));
1283 covered
= btrace_call_prev (&begin
, context
);
1288 covered
= btrace_call_next (&end
, context
);
1293 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1297 printf_unfiltered (_("At the start of the branch trace record.\n"));
1299 printf_unfiltered (_("At the end of the branch trace record.\n"));
1302 btrace_set_call_history (btinfo
, &begin
, &end
);
1305 /* The call_history_range method of target record-btrace. */
1308 record_btrace_target::call_history_range (ULONGEST from
, ULONGEST to
,
1309 record_print_flags flags
)
1311 struct btrace_thread_info
*btinfo
;
1312 struct btrace_call_iterator begin
, end
;
1313 struct ui_out
*uiout
;
1314 unsigned int low
, high
;
1317 uiout
= current_uiout
;
1318 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1322 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags
, low
, high
);
1324 /* Check for wrap-arounds. */
1325 if (low
!= from
|| high
!= to
)
1326 error (_("Bad range."));
1329 error (_("Bad range."));
1331 btinfo
= require_btrace ();
1333 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1335 error (_("Range out of bounds."));
1337 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1340 /* Silently truncate the range. */
1341 btrace_call_end (&end
, btinfo
);
1345 /* We want both begin and end to be inclusive. */
1346 btrace_call_next (&end
, 1);
1349 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1350 btrace_set_call_history (btinfo
, &begin
, &end
);
1353 /* The call_history_from method of target record-btrace. */
1356 record_btrace_target::call_history_from (ULONGEST from
, int size
,
1357 record_print_flags flags
)
1359 ULONGEST begin
, end
, context
;
1361 context
= abs (size
);
1363 error (_("Bad record function-call-history-size."));
1372 begin
= from
- context
+ 1;
1377 end
= from
+ context
- 1;
1379 /* Check for wrap-around. */
1384 call_history_range ( begin
, end
, flags
);
1387 /* The record_method method of target record-btrace. */
1390 record_btrace_target::record_method (ptid_t ptid
)
1392 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1393 thread_info
*const tp
= find_thread_ptid (proc_target
, ptid
);
1396 error (_("No thread."));
1398 if (tp
->btrace
.target
== NULL
)
1399 return RECORD_METHOD_NONE
;
1401 return RECORD_METHOD_BTRACE
;
1404 /* The record_is_replaying method of target record-btrace. */
1407 record_btrace_target::record_is_replaying (ptid_t ptid
)
1409 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1410 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
1411 if (btrace_is_replaying (tp
))
1417 /* The record_will_replay method of target record-btrace. */
1420 record_btrace_target::record_will_replay (ptid_t ptid
, int dir
)
1422 return dir
== EXEC_REVERSE
|| record_is_replaying (ptid
);
1425 /* The xfer_partial method of target record-btrace. */
1427 enum target_xfer_status
1428 record_btrace_target::xfer_partial (enum target_object object
,
1429 const char *annex
, gdb_byte
*readbuf
,
1430 const gdb_byte
*writebuf
, ULONGEST offset
,
1431 ULONGEST len
, ULONGEST
*xfered_len
)
1433 /* Filter out requests that don't make sense during replay. */
1434 if (replay_memory_access
== replay_memory_access_read_only
1435 && !record_btrace_generating_corefile
1436 && record_is_replaying (inferior_ptid
))
1440 case TARGET_OBJECT_MEMORY
:
1442 const struct target_section
*section
;
1444 /* We do not allow writing memory in general. */
1445 if (writebuf
!= NULL
)
1448 return TARGET_XFER_UNAVAILABLE
;
1451 /* We allow reading readonly memory. */
1452 section
= target_section_by_addr (this, offset
);
1453 if (section
!= NULL
)
1455 /* Check if the section we found is readonly. */
1456 if ((bfd_section_flags (section
->the_bfd_section
)
1457 & SEC_READONLY
) != 0)
1459 /* Truncate the request to fit into this section. */
1460 len
= std::min (len
, section
->endaddr
- offset
);
1466 return TARGET_XFER_UNAVAILABLE
;
1471 /* Forward the request. */
1472 return this->beneath ()->xfer_partial (object
, annex
, readbuf
, writebuf
,
1473 offset
, len
, xfered_len
);
1476 /* The insert_breakpoint method of target record-btrace. */
1479 record_btrace_target::insert_breakpoint (struct gdbarch
*gdbarch
,
1480 struct bp_target_info
*bp_tgt
)
1485 /* Inserting breakpoints requires accessing memory. Allow it for the
1486 duration of this function. */
1487 old
= replay_memory_access
;
1488 replay_memory_access
= replay_memory_access_read_write
;
1493 ret
= this->beneath ()->insert_breakpoint (gdbarch
, bp_tgt
);
1495 catch (const gdb_exception
&except
)
1497 replay_memory_access
= old
;
1500 replay_memory_access
= old
;
1505 /* The remove_breakpoint method of target record-btrace. */
1508 record_btrace_target::remove_breakpoint (struct gdbarch
*gdbarch
,
1509 struct bp_target_info
*bp_tgt
,
1510 enum remove_bp_reason reason
)
1515 /* Removing breakpoints requires accessing memory. Allow it for the
1516 duration of this function. */
1517 old
= replay_memory_access
;
1518 replay_memory_access
= replay_memory_access_read_write
;
1523 ret
= this->beneath ()->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
1525 catch (const gdb_exception
&except
)
1527 replay_memory_access
= old
;
1530 replay_memory_access
= old
;
1535 /* The fetch_registers method of target record-btrace. */
1538 record_btrace_target::fetch_registers (struct regcache
*regcache
, int regno
)
1540 btrace_insn_iterator
*replay
= nullptr;
1542 /* Thread-db may ask for a thread's registers before GDB knows about the
1543 thread. We forward the request to the target beneath in this
1545 thread_info
*tp
= find_thread_ptid (regcache
->target (), regcache
->ptid ());
1547 replay
= tp
->btrace
.replay
;
1549 if (replay
!= nullptr && !record_btrace_generating_corefile
)
1551 const struct btrace_insn
*insn
;
1552 struct gdbarch
*gdbarch
;
1555 gdbarch
= regcache
->arch ();
1556 pcreg
= gdbarch_pc_regnum (gdbarch
);
1560 /* We can only provide the PC register. */
1561 if (regno
>= 0 && regno
!= pcreg
)
1564 insn
= btrace_insn_get (replay
);
1565 gdb_assert (insn
!= NULL
);
1567 regcache
->raw_supply (regno
, &insn
->pc
);
1570 this->beneath ()->fetch_registers (regcache
, regno
);
1573 /* The store_registers method of target record-btrace. */
1576 record_btrace_target::store_registers (struct regcache
*regcache
, int regno
)
1578 if (!record_btrace_generating_corefile
1579 && record_is_replaying (regcache
->ptid ()))
1580 error (_("Cannot write registers while replaying."));
1582 gdb_assert (may_write_registers
);
1584 this->beneath ()->store_registers (regcache
, regno
);
1587 /* The prepare_to_store method of target record-btrace. */
1590 record_btrace_target::prepare_to_store (struct regcache
*regcache
)
1592 if (!record_btrace_generating_corefile
1593 && record_is_replaying (regcache
->ptid ()))
1596 this->beneath ()->prepare_to_store (regcache
);
1599 /* The branch trace frame cache. */
1601 struct btrace_frame_cache
1604 struct thread_info
*tp
;
1606 /* The frame info. */
1607 struct frame_info
*frame
;
1609 /* The branch trace function segment. */
1610 const struct btrace_function
*bfun
;
1613 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1615 static htab_t bfcache
;
1617 /* hash_f for htab_create_alloc of bfcache. */
1620 bfcache_hash (const void *arg
)
1622 const struct btrace_frame_cache
*cache
1623 = (const struct btrace_frame_cache
*) arg
;
1625 return htab_hash_pointer (cache
->frame
);
1628 /* eq_f for htab_create_alloc of bfcache. */
1631 bfcache_eq (const void *arg1
, const void *arg2
)
1633 const struct btrace_frame_cache
*cache1
1634 = (const struct btrace_frame_cache
*) arg1
;
1635 const struct btrace_frame_cache
*cache2
1636 = (const struct btrace_frame_cache
*) arg2
;
1638 return cache1
->frame
== cache2
->frame
;
1641 /* Create a new btrace frame cache. */
1643 static struct btrace_frame_cache
*
1644 bfcache_new (struct frame_info
*frame
)
1646 struct btrace_frame_cache
*cache
;
1649 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1650 cache
->frame
= frame
;
1652 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1653 gdb_assert (*slot
== NULL
);
1659 /* Extract the branch trace function from a branch trace frame. */
1661 static const struct btrace_function
*
1662 btrace_get_frame_function (struct frame_info
*frame
)
1664 const struct btrace_frame_cache
*cache
;
1665 struct btrace_frame_cache pattern
;
1668 pattern
.frame
= frame
;
1670 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1674 cache
= (const struct btrace_frame_cache
*) *slot
;
1678 /* Implement stop_reason method for record_btrace_frame_unwind. */
1680 static enum unwind_stop_reason
1681 record_btrace_frame_unwind_stop_reason (struct frame_info
*this_frame
,
1684 const struct btrace_frame_cache
*cache
;
1685 const struct btrace_function
*bfun
;
1687 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1689 gdb_assert (bfun
!= NULL
);
1692 return UNWIND_UNAVAILABLE
;
1694 return UNWIND_NO_REASON
;
1697 /* Implement this_id method for record_btrace_frame_unwind. */
1700 record_btrace_frame_this_id (struct frame_info
*this_frame
, void **this_cache
,
1701 struct frame_id
*this_id
)
1703 const struct btrace_frame_cache
*cache
;
1704 const struct btrace_function
*bfun
;
1705 struct btrace_call_iterator it
;
1706 CORE_ADDR code
, special
;
1708 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1711 gdb_assert (bfun
!= NULL
);
1713 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1714 bfun
= btrace_call_get (&it
);
1716 code
= get_frame_func (this_frame
);
1717 special
= bfun
->number
;
1719 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1721 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1722 btrace_get_bfun_name (cache
->bfun
),
1723 core_addr_to_string_nz (this_id
->code_addr
),
1724 core_addr_to_string_nz (this_id
->special_addr
));
1727 /* Implement prev_register method for record_btrace_frame_unwind. */
1729 static struct value
*
1730 record_btrace_frame_prev_register (struct frame_info
*this_frame
,
1734 const struct btrace_frame_cache
*cache
;
1735 const struct btrace_function
*bfun
, *caller
;
1736 struct btrace_call_iterator it
;
1737 struct gdbarch
*gdbarch
;
1741 gdbarch
= get_frame_arch (this_frame
);
1742 pcreg
= gdbarch_pc_regnum (gdbarch
);
1743 if (pcreg
< 0 || regnum
!= pcreg
)
1744 throw_error (NOT_AVAILABLE_ERROR
,
1745 _("Registers are not available in btrace record history"));
1747 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1749 gdb_assert (bfun
!= NULL
);
1751 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1752 throw_error (NOT_AVAILABLE_ERROR
,
1753 _("No caller in btrace record history"));
1755 caller
= btrace_call_get (&it
);
1757 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1758 pc
= caller
->insn
.front ().pc
;
1761 pc
= caller
->insn
.back ().pc
;
1762 pc
+= gdb_insn_length (gdbarch
, pc
);
1765 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1766 btrace_get_bfun_name (bfun
), bfun
->level
,
1767 core_addr_to_string_nz (pc
));
1769 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1772 /* Implement sniffer method for record_btrace_frame_unwind. */
1775 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1776 struct frame_info
*this_frame
,
1779 const struct btrace_function
*bfun
;
1780 struct btrace_frame_cache
*cache
;
1781 struct thread_info
*tp
;
1782 struct frame_info
*next
;
1784 /* THIS_FRAME does not contain a reference to its thread. */
1785 tp
= inferior_thread ();
1788 next
= get_next_frame (this_frame
);
1791 const struct btrace_insn_iterator
*replay
;
1793 replay
= tp
->btrace
.replay
;
1795 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1799 const struct btrace_function
*callee
;
1800 struct btrace_call_iterator it
;
1802 callee
= btrace_get_frame_function (next
);
1803 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1806 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1809 bfun
= btrace_call_get (&it
);
1815 DEBUG ("[frame] sniffed frame for %s on level %d",
1816 btrace_get_bfun_name (bfun
), bfun
->level
);
1818 /* This is our frame. Initialize the frame cache. */
1819 cache
= bfcache_new (this_frame
);
1823 *this_cache
= cache
;
1827 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1830 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1831 struct frame_info
*this_frame
,
1834 const struct btrace_function
*bfun
, *callee
;
1835 struct btrace_frame_cache
*cache
;
1836 struct btrace_call_iterator it
;
1837 struct frame_info
*next
;
1838 struct thread_info
*tinfo
;
1840 next
= get_next_frame (this_frame
);
1844 callee
= btrace_get_frame_function (next
);
1848 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1851 tinfo
= inferior_thread ();
1852 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1855 bfun
= btrace_call_get (&it
);
1857 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1858 btrace_get_bfun_name (bfun
), bfun
->level
);
1860 /* This is our frame. Initialize the frame cache. */
1861 cache
= bfcache_new (this_frame
);
1865 *this_cache
= cache
;
1870 record_btrace_frame_dealloc_cache (struct frame_info
*self
, void *this_cache
)
1872 struct btrace_frame_cache
*cache
;
1875 cache
= (struct btrace_frame_cache
*) this_cache
;
1877 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1878 gdb_assert (slot
!= NULL
);
1880 htab_remove_elt (bfcache
, cache
);
1883 /* btrace recording does not store previous memory content, neither the stack
1884 frames content. Any unwinding would return erroneous results as the stack
1885 contents no longer matches the changed PC value restored from history.
1886 Therefore this unwinder reports any possibly unwound registers as
1889 const struct frame_unwind record_btrace_frame_unwind
=
1892 record_btrace_frame_unwind_stop_reason
,
1893 record_btrace_frame_this_id
,
1894 record_btrace_frame_prev_register
,
1896 record_btrace_frame_sniffer
,
1897 record_btrace_frame_dealloc_cache
1900 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1903 record_btrace_frame_unwind_stop_reason
,
1904 record_btrace_frame_this_id
,
1905 record_btrace_frame_prev_register
,
1907 record_btrace_tailcall_frame_sniffer
,
1908 record_btrace_frame_dealloc_cache
1911 /* Implement the get_unwinder method. */
1913 const struct frame_unwind
*
1914 record_btrace_target::get_unwinder ()
1916 return &record_btrace_frame_unwind
;
1919 /* Implement the get_tailcall_unwinder method. */
1921 const struct frame_unwind
*
1922 record_btrace_target::get_tailcall_unwinder ()
1924 return &record_btrace_tailcall_frame_unwind
;
1927 /* Return a human-readable string for FLAG. */
1930 btrace_thread_flag_to_str (btrace_thread_flags flag
)
1938 return "reverse-step";
1944 return "reverse-cont";
1953 /* Indicate that TP should be resumed according to FLAG. */
1956 record_btrace_resume_thread (struct thread_info
*tp
,
1957 enum btrace_thread_flag flag
)
1959 struct btrace_thread_info
*btinfo
;
1961 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1962 target_pid_to_str (tp
->ptid
).c_str (), flag
,
1963 btrace_thread_flag_to_str (flag
));
1965 btinfo
= &tp
->btrace
;
1967 /* Fetch the latest branch trace. */
1968 btrace_fetch (tp
, record_btrace_get_cpu ());
1970 /* A resume request overwrites a preceding resume or stop request. */
1971 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1972 btinfo
->flags
|= flag
;
1975 /* Get the current frame for TP. */
1977 static struct frame_id
1978 get_thread_current_frame_id (struct thread_info
*tp
)
1983 /* Set current thread, which is implicitly used by
1984 get_current_frame. */
1985 scoped_restore_current_thread restore_thread
;
1987 switch_to_thread (tp
);
1989 process_stratum_target
*proc_target
= tp
->inf
->process_target ();
1991 /* Clear the executing flag to allow changes to the current frame.
1992 We are not actually running, yet. We just started a reverse execution
1993 command or a record goto command.
1994 For the latter, EXECUTING is false and this has no effect.
1995 For the former, EXECUTING is true and we're in wait, about to
1996 move the thread. Since we need to recompute the stack, we temporarily
1997 set EXECUTING to false. */
1998 executing
= tp
->executing
;
1999 set_executing (proc_target
, inferior_ptid
, false);
2004 id
= get_frame_id (get_current_frame ());
2006 catch (const gdb_exception
&except
)
2008 /* Restore the previous execution state. */
2009 set_executing (proc_target
, inferior_ptid
, executing
);
2014 /* Restore the previous execution state. */
2015 set_executing (proc_target
, inferior_ptid
, executing
);
2020 /* Start replaying a thread. */
2022 static struct btrace_insn_iterator
*
2023 record_btrace_start_replaying (struct thread_info
*tp
)
2025 struct btrace_insn_iterator
*replay
;
2026 struct btrace_thread_info
*btinfo
;
2028 btinfo
= &tp
->btrace
;
2031 /* We can't start replaying without trace. */
2032 if (btinfo
->functions
.empty ())
2035 /* GDB stores the current frame_id when stepping in order to detects steps
2037 Since frames are computed differently when we're replaying, we need to
2038 recompute those stored frames and fix them up so we can still detect
2039 subroutines after we started replaying. */
2042 struct frame_id frame_id
;
2043 int upd_step_frame_id
, upd_step_stack_frame_id
;
2045 /* The current frame without replaying - computed via normal unwind. */
2046 frame_id
= get_thread_current_frame_id (tp
);
2048 /* Check if we need to update any stepping-related frame id's. */
2049 upd_step_frame_id
= frame_id_eq (frame_id
,
2050 tp
->control
.step_frame_id
);
2051 upd_step_stack_frame_id
= frame_id_eq (frame_id
,
2052 tp
->control
.step_stack_frame_id
);
2054 /* We start replaying at the end of the branch trace. This corresponds
2055 to the current instruction. */
2056 replay
= XNEW (struct btrace_insn_iterator
);
2057 btrace_insn_end (replay
, btinfo
);
2059 /* Skip gaps at the end of the trace. */
2060 while (btrace_insn_get (replay
) == NULL
)
2064 steps
= btrace_insn_prev (replay
, 1);
2066 error (_("No trace."));
2069 /* We're not replaying, yet. */
2070 gdb_assert (btinfo
->replay
== NULL
);
2071 btinfo
->replay
= replay
;
2073 /* Make sure we're not using any stale registers. */
2074 registers_changed_thread (tp
);
2076 /* The current frame with replaying - computed via btrace unwind. */
2077 frame_id
= get_thread_current_frame_id (tp
);
2079 /* Replace stepping related frames where necessary. */
2080 if (upd_step_frame_id
)
2081 tp
->control
.step_frame_id
= frame_id
;
2082 if (upd_step_stack_frame_id
)
2083 tp
->control
.step_stack_frame_id
= frame_id
;
2085 catch (const gdb_exception
&except
)
2087 xfree (btinfo
->replay
);
2088 btinfo
->replay
= NULL
;
2090 registers_changed_thread (tp
);
2098 /* Stop replaying a thread. */
2101 record_btrace_stop_replaying (struct thread_info
*tp
)
2103 struct btrace_thread_info
*btinfo
;
2105 btinfo
= &tp
->btrace
;
2107 xfree (btinfo
->replay
);
2108 btinfo
->replay
= NULL
;
2110 /* Make sure we're not leaving any stale registers. */
2111 registers_changed_thread (tp
);
2114 /* Stop replaying TP if it is at the end of its execution history. */
2117 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2119 struct btrace_insn_iterator
*replay
, end
;
2120 struct btrace_thread_info
*btinfo
;
2122 btinfo
= &tp
->btrace
;
2123 replay
= btinfo
->replay
;
2128 btrace_insn_end (&end
, btinfo
);
2130 if (btrace_insn_cmp (replay
, &end
) == 0)
2131 record_btrace_stop_replaying (tp
);
2134 /* The resume method of target record-btrace. */
2137 record_btrace_target::resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2139 enum btrace_thread_flag flag
, cflag
;
2141 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid
).c_str (),
2142 ::execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2143 step
? "step" : "cont");
2145 /* Store the execution direction of the last resume.
2147 If there is more than one resume call, we have to rely on infrun
2148 to not change the execution direction in-between. */
2149 record_btrace_resume_exec_dir
= ::execution_direction
;
2151 /* As long as we're not replaying, just forward the request.
2153 For non-stop targets this means that no thread is replaying. In order to
2154 make progress, we may need to explicitly move replaying threads to the end
2155 of their execution history. */
2156 if ((::execution_direction
!= EXEC_REVERSE
)
2157 && !record_is_replaying (minus_one_ptid
))
2159 this->beneath ()->resume (ptid
, step
, signal
);
2163 /* Compute the btrace thread flag for the requested move. */
2164 if (::execution_direction
== EXEC_REVERSE
)
2166 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2171 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2175 /* We just indicate the resume intent here. The actual stepping happens in
2176 record_btrace_wait below.
2178 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2180 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2182 if (!target_is_non_stop_p ())
2184 gdb_assert (inferior_ptid
.matches (ptid
));
2186 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2188 if (tp
->ptid
.matches (inferior_ptid
))
2189 record_btrace_resume_thread (tp
, flag
);
2191 record_btrace_resume_thread (tp
, cflag
);
2196 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2197 record_btrace_resume_thread (tp
, flag
);
2200 /* Async support. */
2201 if (target_can_async_p ())
2204 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2208 /* Cancel resuming TP. */
2211 record_btrace_cancel_resume (struct thread_info
*tp
)
2213 btrace_thread_flags flags
;
2215 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2219 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2220 print_thread_id (tp
),
2221 target_pid_to_str (tp
->ptid
).c_str (), flags
.raw (),
2222 btrace_thread_flag_to_str (flags
));
2224 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2225 record_btrace_stop_replaying_at_end (tp
);
2228 /* Return a target_waitstatus indicating that we ran out of history. */
2230 static struct target_waitstatus
2231 btrace_step_no_history (void)
2233 struct target_waitstatus status
;
2235 status
.kind
= TARGET_WAITKIND_NO_HISTORY
;
2240 /* Return a target_waitstatus indicating that a step finished. */
2242 static struct target_waitstatus
2243 btrace_step_stopped (void)
2245 struct target_waitstatus status
;
2247 status
.kind
= TARGET_WAITKIND_STOPPED
;
2248 status
.value
.sig
= GDB_SIGNAL_TRAP
;
2253 /* Return a target_waitstatus indicating that a thread was stopped as
2256 static struct target_waitstatus
2257 btrace_step_stopped_on_request (void)
2259 struct target_waitstatus status
;
2261 status
.kind
= TARGET_WAITKIND_STOPPED
;
2262 status
.value
.sig
= GDB_SIGNAL_0
;
2267 /* Return a target_waitstatus indicating a spurious stop. */
2269 static struct target_waitstatus
2270 btrace_step_spurious (void)
2272 struct target_waitstatus status
;
2274 status
.kind
= TARGET_WAITKIND_SPURIOUS
;
2279 /* Return a target_waitstatus indicating that the thread was not resumed. */
2281 static struct target_waitstatus
2282 btrace_step_no_resumed (void)
2284 struct target_waitstatus status
;
2286 status
.kind
= TARGET_WAITKIND_NO_RESUMED
;
2291 /* Return a target_waitstatus indicating that we should wait again. */
2293 static struct target_waitstatus
2294 btrace_step_again (void)
2296 struct target_waitstatus status
;
2298 status
.kind
= TARGET_WAITKIND_IGNORE
;
2303 /* Clear the record histories. */
2306 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2308 xfree (btinfo
->insn_history
);
2309 xfree (btinfo
->call_history
);
2311 btinfo
->insn_history
= NULL
;
2312 btinfo
->call_history
= NULL
;
2315 /* Check whether TP's current replay position is at a breakpoint. */
2318 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2320 struct btrace_insn_iterator
*replay
;
2321 struct btrace_thread_info
*btinfo
;
2322 const struct btrace_insn
*insn
;
2324 btinfo
= &tp
->btrace
;
2325 replay
= btinfo
->replay
;
2330 insn
= btrace_insn_get (replay
);
2334 return record_check_stopped_by_breakpoint (tp
->inf
->aspace
, insn
->pc
,
2335 &btinfo
->stop_reason
);
2338 /* Step one instruction in forward direction. */
2340 static struct target_waitstatus
2341 record_btrace_single_step_forward (struct thread_info
*tp
)
2343 struct btrace_insn_iterator
*replay
, end
, start
;
2344 struct btrace_thread_info
*btinfo
;
2346 btinfo
= &tp
->btrace
;
2347 replay
= btinfo
->replay
;
2349 /* We're done if we're not replaying. */
2351 return btrace_step_no_history ();
2353 /* Check if we're stepping a breakpoint. */
2354 if (record_btrace_replay_at_breakpoint (tp
))
2355 return btrace_step_stopped ();
2357 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2358 jump back to the instruction at which we started. */
2364 /* We will bail out here if we continue stepping after reaching the end
2365 of the execution history. */
2366 steps
= btrace_insn_next (replay
, 1);
2370 return btrace_step_no_history ();
2373 while (btrace_insn_get (replay
) == NULL
);
2375 /* Determine the end of the instruction trace. */
2376 btrace_insn_end (&end
, btinfo
);
2378 /* The execution trace contains (and ends with) the current instruction.
2379 This instruction has not been executed, yet, so the trace really ends
2380 one instruction earlier. */
2381 if (btrace_insn_cmp (replay
, &end
) == 0)
2382 return btrace_step_no_history ();
2384 return btrace_step_spurious ();
2387 /* Step one instruction in backward direction. */
2389 static struct target_waitstatus
2390 record_btrace_single_step_backward (struct thread_info
*tp
)
2392 struct btrace_insn_iterator
*replay
, start
;
2393 struct btrace_thread_info
*btinfo
;
2395 btinfo
= &tp
->btrace
;
2396 replay
= btinfo
->replay
;
2398 /* Start replaying if we're not already doing so. */
2400 replay
= record_btrace_start_replaying (tp
);
2402 /* If we can't step any further, we reached the end of the history.
2403 Skip gaps during replay. If we end up at a gap (at the beginning of
2404 the trace), jump back to the instruction at which we started. */
2410 steps
= btrace_insn_prev (replay
, 1);
2414 return btrace_step_no_history ();
2417 while (btrace_insn_get (replay
) == NULL
);
2419 /* Check if we're stepping a breakpoint.
2421 For reverse-stepping, this check is after the step. There is logic in
2422 infrun.c that handles reverse-stepping separately. See, for example,
2423 proceed and adjust_pc_after_break.
2425 This code assumes that for reverse-stepping, PC points to the last
2426 de-executed instruction, whereas for forward-stepping PC points to the
2427 next to-be-executed instruction. */
2428 if (record_btrace_replay_at_breakpoint (tp
))
2429 return btrace_step_stopped ();
2431 return btrace_step_spurious ();
2434 /* Step a single thread. */
2436 static struct target_waitstatus
2437 record_btrace_step_thread (struct thread_info
*tp
)
2439 struct btrace_thread_info
*btinfo
;
2440 struct target_waitstatus status
;
2441 btrace_thread_flags flags
;
2443 btinfo
= &tp
->btrace
;
2445 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2446 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2448 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2449 target_pid_to_str (tp
->ptid
).c_str (), flags
.raw (),
2450 btrace_thread_flag_to_str (flags
));
2452 /* We can't step without an execution history. */
2453 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2454 return btrace_step_no_history ();
2459 internal_error (__FILE__
, __LINE__
, _("invalid stepping type."));
2462 return btrace_step_stopped_on_request ();
2465 status
= record_btrace_single_step_forward (tp
);
2466 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2469 return btrace_step_stopped ();
2472 status
= record_btrace_single_step_backward (tp
);
2473 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2476 return btrace_step_stopped ();
2479 status
= record_btrace_single_step_forward (tp
);
2480 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2483 btinfo
->flags
|= flags
;
2484 return btrace_step_again ();
2487 status
= record_btrace_single_step_backward (tp
);
2488 if (status
.kind
!= TARGET_WAITKIND_SPURIOUS
)
2491 btinfo
->flags
|= flags
;
2492 return btrace_step_again ();
2495 /* We keep threads moving at the end of their execution history. The wait
2496 method will stop the thread for whom the event is reported. */
2497 if (status
.kind
== TARGET_WAITKIND_NO_HISTORY
)
2498 btinfo
->flags
|= flags
;
2503 /* Announce further events if necessary. */
2506 record_btrace_maybe_mark_async_event
2507 (const std::vector
<thread_info
*> &moving
,
2508 const std::vector
<thread_info
*> &no_history
)
2510 bool more_moving
= !moving
.empty ();
2511 bool more_no_history
= !no_history
.empty ();;
2513 if (!more_moving
&& !more_no_history
)
2517 DEBUG ("movers pending");
2519 if (more_no_history
)
2520 DEBUG ("no-history pending");
2522 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2525 /* The wait method of target record-btrace. */
2528 record_btrace_target::wait (ptid_t ptid
, struct target_waitstatus
*status
,
2529 target_wait_flags options
)
2531 std::vector
<thread_info
*> moving
;
2532 std::vector
<thread_info
*> no_history
;
2534 /* Clear this, if needed we'll re-mark it below. */
2535 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
2537 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid
).c_str (),
2538 (unsigned) options
);
2540 /* As long as we're not replaying, just forward the request. */
2541 if ((::execution_direction
!= EXEC_REVERSE
)
2542 && !record_is_replaying (minus_one_ptid
))
2544 return this->beneath ()->wait (ptid
, status
, options
);
2547 /* Keep a work list of moving threads. */
2548 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2549 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2550 if ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0)
2551 moving
.push_back (tp
);
2553 if (moving
.empty ())
2555 *status
= btrace_step_no_resumed ();
2557 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid
).c_str (),
2558 target_waitstatus_to_string (status
).c_str ());
2563 /* Step moving threads one by one, one step each, until either one thread
2564 reports an event or we run out of threads to step.
2566 When stepping more than one thread, chances are that some threads reach
2567 the end of their execution history earlier than others. If we reported
2568 this immediately, all-stop on top of non-stop would stop all threads and
2569 resume the same threads next time. And we would report the same thread
2570 having reached the end of its execution history again.
2572 In the worst case, this would starve the other threads. But even if other
2573 threads would be allowed to make progress, this would result in far too
2574 many intermediate stops.
2576 We therefore delay the reporting of "no execution history" until we have
2577 nothing else to report. By this time, all threads should have moved to
2578 either the beginning or the end of their execution history. There will
2579 be a single user-visible stop. */
2580 struct thread_info
*eventing
= NULL
;
2581 while ((eventing
== NULL
) && !moving
.empty ())
2583 for (unsigned int ix
= 0; eventing
== NULL
&& ix
< moving
.size ();)
2585 thread_info
*tp
= moving
[ix
];
2587 *status
= record_btrace_step_thread (tp
);
2589 switch (status
->kind
)
2591 case TARGET_WAITKIND_IGNORE
:
2595 case TARGET_WAITKIND_NO_HISTORY
:
2596 no_history
.push_back (ordered_remove (moving
, ix
));
2600 eventing
= unordered_remove (moving
, ix
);
2606 if (eventing
== NULL
)
2608 /* We started with at least one moving thread. This thread must have
2609 either stopped or reached the end of its execution history.
2611 In the former case, EVENTING must not be NULL.
2612 In the latter case, NO_HISTORY must not be empty. */
2613 gdb_assert (!no_history
.empty ());
2615 /* We kept threads moving at the end of their execution history. Stop
2616 EVENTING now that we are going to report its stop. */
2617 eventing
= unordered_remove (no_history
, 0);
2618 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2620 *status
= btrace_step_no_history ();
2623 gdb_assert (eventing
!= NULL
);
2625 /* We kept threads replaying at the end of their execution history. Stop
2626 replaying EVENTING now that we are going to report its stop. */
2627 record_btrace_stop_replaying_at_end (eventing
);
2629 /* Stop all other threads. */
2630 if (!target_is_non_stop_p ())
2632 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
2633 record_btrace_cancel_resume (tp
);
2636 /* In async mode, we need to announce further events. */
2637 if (target_is_async_p ())
2638 record_btrace_maybe_mark_async_event (moving
, no_history
);
2640 /* Start record histories anew from the current position. */
2641 record_btrace_clear_histories (&eventing
->btrace
);
2643 /* We moved the replay position but did not update registers. */
2644 registers_changed_thread (eventing
);
2646 DEBUG ("wait ended by thread %s (%s): %s",
2647 print_thread_id (eventing
),
2648 target_pid_to_str (eventing
->ptid
).c_str (),
2649 target_waitstatus_to_string (status
).c_str ());
2651 return eventing
->ptid
;
2654 /* The stop method of target record-btrace. */
2657 record_btrace_target::stop (ptid_t ptid
)
2659 DEBUG ("stop %s", target_pid_to_str (ptid
).c_str ());
2661 /* As long as we're not replaying, just forward the request. */
2662 if ((::execution_direction
!= EXEC_REVERSE
)
2663 && !record_is_replaying (minus_one_ptid
))
2665 this->beneath ()->stop (ptid
);
2669 process_stratum_target
*proc_target
2670 = current_inferior ()->process_target ();
2672 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2674 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2675 tp
->btrace
.flags
|= BTHR_STOP
;
2680 /* The can_execute_reverse method of target record-btrace. */
2683 record_btrace_target::can_execute_reverse ()
2688 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2691 record_btrace_target::stopped_by_sw_breakpoint ()
2693 if (record_is_replaying (minus_one_ptid
))
2695 struct thread_info
*tp
= inferior_thread ();
2697 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2700 return this->beneath ()->stopped_by_sw_breakpoint ();
2703 /* The supports_stopped_by_sw_breakpoint method of target
2707 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2709 if (record_is_replaying (minus_one_ptid
))
2712 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2715 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2718 record_btrace_target::stopped_by_hw_breakpoint ()
2720 if (record_is_replaying (minus_one_ptid
))
2722 struct thread_info
*tp
= inferior_thread ();
2724 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2727 return this->beneath ()->stopped_by_hw_breakpoint ();
2730 /* The supports_stopped_by_hw_breakpoint method of target
2734 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2736 if (record_is_replaying (minus_one_ptid
))
2739 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2742 /* The update_thread_list method of target record-btrace. */
2745 record_btrace_target::update_thread_list ()
2747 /* We don't add or remove threads during replay. */
2748 if (record_is_replaying (minus_one_ptid
))
2751 /* Forward the request. */
2752 this->beneath ()->update_thread_list ();
2755 /* The thread_alive method of target record-btrace. */
2758 record_btrace_target::thread_alive (ptid_t ptid
)
2760 /* We don't add or remove threads during replay. */
2761 if (record_is_replaying (minus_one_ptid
))
2764 /* Forward the request. */
2765 return this->beneath ()->thread_alive (ptid
);
2768 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2772 record_btrace_set_replay (struct thread_info
*tp
,
2773 const struct btrace_insn_iterator
*it
)
2775 struct btrace_thread_info
*btinfo
;
2777 btinfo
= &tp
->btrace
;
2780 record_btrace_stop_replaying (tp
);
2783 if (btinfo
->replay
== NULL
)
2784 record_btrace_start_replaying (tp
);
2785 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2788 *btinfo
->replay
= *it
;
2789 registers_changed_thread (tp
);
2792 /* Start anew from the new replay position. */
2793 record_btrace_clear_histories (btinfo
);
2795 inferior_thread ()->suspend
.stop_pc
2796 = regcache_read_pc (get_current_regcache ());
2797 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2800 /* The goto_record_begin method of target record-btrace. */
2803 record_btrace_target::goto_record_begin ()
2805 struct thread_info
*tp
;
2806 struct btrace_insn_iterator begin
;
2808 tp
= require_btrace_thread ();
2810 btrace_insn_begin (&begin
, &tp
->btrace
);
2812 /* Skip gaps at the beginning of the trace. */
2813 while (btrace_insn_get (&begin
) == NULL
)
2817 steps
= btrace_insn_next (&begin
, 1);
2819 error (_("No trace."));
2822 record_btrace_set_replay (tp
, &begin
);
2825 /* The goto_record_end method of target record-btrace. */
2828 record_btrace_target::goto_record_end ()
2830 struct thread_info
*tp
;
2832 tp
= require_btrace_thread ();
2834 record_btrace_set_replay (tp
, NULL
);
2837 /* The goto_record method of target record-btrace. */
2840 record_btrace_target::goto_record (ULONGEST insn
)
2842 struct thread_info
*tp
;
2843 struct btrace_insn_iterator it
;
2844 unsigned int number
;
2849 /* Check for wrap-arounds. */
2851 error (_("Instruction number out of range."));
2853 tp
= require_btrace_thread ();
2855 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2857 /* Check if the instruction could not be found or is a gap. */
2858 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2859 error (_("No such instruction."));
2861 record_btrace_set_replay (tp
, &it
);
2864 /* The record_stop_replaying method of target record-btrace. */
2867 record_btrace_target::record_stop_replaying ()
2869 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
2870 record_btrace_stop_replaying (tp
);
2873 /* The execution_direction target method. */
2875 enum exec_direction_kind
2876 record_btrace_target::execution_direction ()
2878 return record_btrace_resume_exec_dir
;
2881 /* The prepare_to_generate_core target method. */
2884 record_btrace_target::prepare_to_generate_core ()
2886 record_btrace_generating_corefile
= 1;
2889 /* The done_generating_core target method. */
2892 record_btrace_target::done_generating_core ()
2894 record_btrace_generating_corefile
= 0;
2897 /* Start recording in BTS format. */
2900 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2902 if (args
!= NULL
&& *args
!= 0)
2903 error (_("Invalid argument."));
2905 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2909 execute_command ("target record-btrace", from_tty
);
2911 catch (const gdb_exception
&exception
)
2913 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2918 /* Start recording in Intel Processor Trace format. */
2921 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2923 if (args
!= NULL
&& *args
!= 0)
2924 error (_("Invalid argument."));
2926 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2930 execute_command ("target record-btrace", from_tty
);
2932 catch (const gdb_exception
&exception
)
2934 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2939 /* Alias for "target record". */
2942 cmd_record_btrace_start (const char *args
, int from_tty
)
2944 if (args
!= NULL
&& *args
!= 0)
2945 error (_("Invalid argument."));
2947 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2951 execute_command ("target record-btrace", from_tty
);
2953 catch (const gdb_exception
&exception
)
2955 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2959 execute_command ("target record-btrace", from_tty
);
2961 catch (const gdb_exception
&ex
)
2963 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2969 /* The "show record btrace replay-memory-access" command. */
2972 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2973 struct cmd_list_element
*c
, const char *value
)
2975 fprintf_filtered (gdb_stdout
, _("Replay memory access is %s.\n"),
2976 replay_memory_access
);
2979 /* The "set record btrace cpu none" command. */
2982 cmd_set_record_btrace_cpu_none (const char *args
, int from_tty
)
2984 if (args
!= nullptr && *args
!= 0)
2985 error (_("Trailing junk: '%s'."), args
);
2987 record_btrace_cpu_state
= CS_NONE
;
2990 /* The "set record btrace cpu auto" command. */
2993 cmd_set_record_btrace_cpu_auto (const char *args
, int from_tty
)
2995 if (args
!= nullptr && *args
!= 0)
2996 error (_("Trailing junk: '%s'."), args
);
2998 record_btrace_cpu_state
= CS_AUTO
;
3001 /* The "set record btrace cpu" command. */
3004 cmd_set_record_btrace_cpu (const char *args
, int from_tty
)
3006 if (args
== nullptr)
3009 /* We use a hard-coded vendor string for now. */
3010 unsigned int family
, model
, stepping
;
3011 int l1
, l2
, matches
= sscanf (args
, "intel: %u/%u%n/%u%n", &family
,
3012 &model
, &l1
, &stepping
, &l2
);
3015 if (strlen (args
) != l2
)
3016 error (_("Trailing junk: '%s'."), args
+ l2
);
3018 else if (matches
== 2)
3020 if (strlen (args
) != l1
)
3021 error (_("Trailing junk: '%s'."), args
+ l1
);
3026 error (_("Bad format. See \"help set record btrace cpu\"."));
3028 if (USHRT_MAX
< family
)
3029 error (_("Cpu family too big."));
3031 if (UCHAR_MAX
< model
)
3032 error (_("Cpu model too big."));
3034 if (UCHAR_MAX
< stepping
)
3035 error (_("Cpu stepping too big."));
3037 record_btrace_cpu
.vendor
= CV_INTEL
;
3038 record_btrace_cpu
.family
= family
;
3039 record_btrace_cpu
.model
= model
;
3040 record_btrace_cpu
.stepping
= stepping
;
3042 record_btrace_cpu_state
= CS_CPU
;
3045 /* The "show record btrace cpu" command. */
3048 cmd_show_record_btrace_cpu (const char *args
, int from_tty
)
3050 if (args
!= nullptr && *args
!= 0)
3051 error (_("Trailing junk: '%s'."), args
);
3053 switch (record_btrace_cpu_state
)
3056 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3060 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3064 switch (record_btrace_cpu
.vendor
)
3067 if (record_btrace_cpu
.stepping
== 0)
3068 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3069 record_btrace_cpu
.family
,
3070 record_btrace_cpu
.model
);
3072 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3073 record_btrace_cpu
.family
,
3074 record_btrace_cpu
.model
,
3075 record_btrace_cpu
.stepping
);
3080 error (_("Internal error: bad cpu state."));
3083 /* The "record bts buffer-size" show value function. */
3086 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3087 struct cmd_list_element
*c
,
3090 fprintf_filtered (file
, _("The record/replay bts buffer size is %s.\n"),
3094 /* The "record pt buffer-size" show value function. */
3097 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3098 struct cmd_list_element
*c
,
3101 fprintf_filtered (file
, _("The record/replay pt buffer size is %s.\n"),
3105 /* Initialize btrace commands. */
3107 void _initialize_record_btrace ();
3109 _initialize_record_btrace ()
3111 add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3112 _("Start branch trace recording."), &record_btrace_cmdlist
,
3113 "record btrace ", 0, &record_cmdlist
);
3114 add_alias_cmd ("b", "btrace", class_obscure
, 1, &record_cmdlist
);
3116 add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3118 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3119 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3120 This format may not be available on all processors."),
3121 &record_btrace_cmdlist
);
3122 add_alias_cmd ("bts", "btrace bts", class_obscure
, 1, &record_cmdlist
);
3124 add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3126 Start branch trace recording in Intel Processor Trace format.\n\n\
3127 This format may not be available on all processors."),
3128 &record_btrace_cmdlist
);
3129 add_alias_cmd ("pt", "btrace pt", class_obscure
, 1, &record_cmdlist
);
3131 add_basic_prefix_cmd ("btrace", class_support
,
3132 _("Set record options."), &set_record_btrace_cmdlist
,
3133 "set record btrace ", 0, &set_record_cmdlist
);
3135 add_show_prefix_cmd ("btrace", class_support
,
3136 _("Show record options."), &show_record_btrace_cmdlist
,
3137 "show record btrace ", 0, &show_record_cmdlist
);
3139 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3140 replay_memory_access_types
, &replay_memory_access
, _("\
3141 Set what memory accesses are allowed during replay."), _("\
3142 Show what memory accesses are allowed during replay."),
3143 _("Default is READ-ONLY.\n\n\
3144 The btrace record target does not trace data.\n\
3145 The memory therefore corresponds to the live target and not \
3146 to the current replay position.\n\n\
3147 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3148 When READ-WRITE, allow accesses to read-only and read-write memory during \
3150 NULL
, cmd_show_replay_memory_access
,
3151 &set_record_btrace_cmdlist
,
3152 &show_record_btrace_cmdlist
);
3154 add_prefix_cmd ("cpu", class_support
, cmd_set_record_btrace_cpu
,
3156 Set the cpu to be used for trace decode.\n\n\
3157 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3158 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3159 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3160 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3161 When GDB does not support that cpu, this option can be used to enable\n\
3162 workarounds for a similar cpu that GDB supports.\n\n\
3163 When set to \"none\", errata workarounds are disabled."),
3164 &set_record_btrace_cpu_cmdlist
,
3165 "set record btrace cpu ", 1,
3166 &set_record_btrace_cmdlist
);
3168 add_cmd ("auto", class_support
, cmd_set_record_btrace_cpu_auto
, _("\
3169 Automatically determine the cpu to be used for trace decode."),
3170 &set_record_btrace_cpu_cmdlist
);
3172 add_cmd ("none", class_support
, cmd_set_record_btrace_cpu_none
, _("\
3173 Do not enable errata workarounds for trace decode."),
3174 &set_record_btrace_cpu_cmdlist
);
3176 add_cmd ("cpu", class_support
, cmd_show_record_btrace_cpu
, _("\
3177 Show the cpu to be used for trace decode."),
3178 &show_record_btrace_cmdlist
);
3180 add_basic_prefix_cmd ("bts", class_support
,
3181 _("Set record btrace bts options."),
3182 &set_record_btrace_bts_cmdlist
,
3183 "set record btrace bts ", 0,
3184 &set_record_btrace_cmdlist
);
3186 add_show_prefix_cmd ("bts", class_support
,
3187 _("Show record btrace bts options."),
3188 &show_record_btrace_bts_cmdlist
,
3189 "show record btrace bts ", 0,
3190 &show_record_btrace_cmdlist
);
3192 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3193 &record_btrace_conf
.bts
.size
,
3194 _("Set the record/replay bts buffer size."),
3195 _("Show the record/replay bts buffer size."), _("\
3196 When starting recording request a trace buffer of this size. \
3197 The actual buffer size may differ from the requested size. \
3198 Use \"info record\" to see the actual buffer size.\n\n\
3199 Bigger buffers allow longer recording but also take more time to process \
3200 the recorded execution trace.\n\n\
3201 The trace buffer size may not be changed while recording."), NULL
,
3202 show_record_bts_buffer_size_value
,
3203 &set_record_btrace_bts_cmdlist
,
3204 &show_record_btrace_bts_cmdlist
);
3206 add_basic_prefix_cmd ("pt", class_support
,
3207 _("Set record btrace pt options."),
3208 &set_record_btrace_pt_cmdlist
,
3209 "set record btrace pt ", 0,
3210 &set_record_btrace_cmdlist
);
3212 add_show_prefix_cmd ("pt", class_support
,
3213 _("Show record btrace pt options."),
3214 &show_record_btrace_pt_cmdlist
,
3215 "show record btrace pt ", 0,
3216 &show_record_btrace_cmdlist
);
3218 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3219 &record_btrace_conf
.pt
.size
,
3220 _("Set the record/replay pt buffer size."),
3221 _("Show the record/replay pt buffer size."), _("\
3222 Bigger buffers allow longer recording but also take more time to process \
3223 the recorded execution.\n\
3224 The actual buffer size may differ from the requested size. Use \"info record\" \
3225 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3226 &set_record_btrace_pt_cmdlist
,
3227 &show_record_btrace_pt_cmdlist
);
3229 add_target (record_btrace_target_info
, record_btrace_target_open
);
3231 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3234 record_btrace_conf
.bts
.size
= 64 * 1024;
3235 record_btrace_conf
.pt
.size
= 16 * 1024;