1 /* Branch trace support for GDB, the GNU debugger.
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
7 This file is part of GDB.
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
24 #include "record-btrace.h"
25 #include "gdbthread.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
34 #include "filenames.h"
36 #include "frame-unwind.h"
39 #include "gdbsupport/event-loop.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46 #include <forward_list>
49 static const target_info record_btrace_target_info
= {
51 N_("Branch tracing target"),
52 N_("Collect control-flow trace and provide the execution history.")
55 /* The target_ops of record-btrace. */
57 class record_btrace_target final
: public target_ops
60 const target_info
&info () const override
61 { return record_btrace_target_info
; }
63 strata
stratum () const override
{ return record_stratum
; }
65 void close () override
;
66 void async (bool) override
;
68 void detach (inferior
*inf
, int from_tty
) override
69 { record_detach (this, inf
, from_tty
); }
71 void disconnect (const char *, int) override
;
73 void mourn_inferior () override
74 { record_mourn_inferior (this); }
77 { record_kill (this); }
79 enum record_method
record_method (ptid_t ptid
) override
;
81 void stop_recording () override
;
82 void info_record () override
;
84 void insn_history (int size
, gdb_disassembly_flags flags
) override
;
85 void insn_history_from (ULONGEST from
, int size
,
86 gdb_disassembly_flags flags
) override
;
87 void insn_history_range (ULONGEST begin
, ULONGEST end
,
88 gdb_disassembly_flags flags
) override
;
89 void call_history (int size
, record_print_flags flags
) override
;
90 void call_history_from (ULONGEST begin
, int size
, record_print_flags flags
)
92 void call_history_range (ULONGEST begin
, ULONGEST end
, record_print_flags flags
)
95 bool record_is_replaying (ptid_t ptid
) override
;
96 bool record_will_replay (ptid_t ptid
, int dir
) override
;
97 void record_stop_replaying () override
;
99 enum target_xfer_status
xfer_partial (enum target_object object
,
102 const gdb_byte
*writebuf
,
103 ULONGEST offset
, ULONGEST len
,
104 ULONGEST
*xfered_len
) override
;
106 int insert_breakpoint (struct gdbarch
*,
107 struct bp_target_info
*) override
;
108 int remove_breakpoint (struct gdbarch
*, struct bp_target_info
*,
109 enum remove_bp_reason
) override
;
111 void fetch_registers (struct regcache
*, int) override
;
113 void store_registers (struct regcache
*, int) override
;
114 void prepare_to_store (struct regcache
*) override
;
116 const struct frame_unwind
*get_unwinder () override
;
118 const struct frame_unwind
*get_tailcall_unwinder () override
;
120 void resume (ptid_t
, int, enum gdb_signal
) override
;
121 ptid_t
wait (ptid_t
, struct target_waitstatus
*, target_wait_flags
) override
;
123 void stop (ptid_t
) override
;
124 void update_thread_list () override
;
125 bool thread_alive (ptid_t ptid
) override
;
126 void goto_record_begin () override
;
127 void goto_record_end () override
;
128 void goto_record (ULONGEST insn
) override
;
130 bool can_execute_reverse () override
;
132 bool stopped_by_sw_breakpoint () override
;
133 bool supports_stopped_by_sw_breakpoint () override
;
135 bool stopped_by_hw_breakpoint () override
;
136 bool supports_stopped_by_hw_breakpoint () override
;
138 enum exec_direction_kind
execution_direction () override
;
139 void prepare_to_generate_core () override
;
140 void done_generating_core () override
;
143 static record_btrace_target record_btrace_ops
;
145 /* Initialize the record-btrace target ops. */
147 /* Token associated with a new-thread observer enabling branch tracing
148 for the new thread. */
149 static const gdb::observers::token record_btrace_thread_observer_token
{};
151 /* Memory access types used in set/show record btrace replay-memory-access. */
152 static const char replay_memory_access_read_only
[] = "read-only";
153 static const char replay_memory_access_read_write
[] = "read-write";
154 static const char *const replay_memory_access_types
[] =
156 replay_memory_access_read_only
,
157 replay_memory_access_read_write
,
161 /* The currently allowed replay memory access type. */
162 static const char *replay_memory_access
= replay_memory_access_read_only
;
164 /* The cpu state kinds. */
165 enum record_btrace_cpu_state_kind
172 /* The current cpu state. */
173 static enum record_btrace_cpu_state_kind record_btrace_cpu_state
= CS_AUTO
;
175 /* The current cpu for trace decode. */
176 static struct btrace_cpu record_btrace_cpu
;
178 /* Command lists for "set/show record btrace". */
179 static struct cmd_list_element
*set_record_btrace_cmdlist
;
180 static struct cmd_list_element
*show_record_btrace_cmdlist
;
182 /* The execution direction of the last resume we got. See record-full.c. */
183 static enum exec_direction_kind record_btrace_resume_exec_dir
= EXEC_FORWARD
;
185 /* The async event handler for reverse/replay execution. */
186 static struct async_event_handler
*record_btrace_async_inferior_event_handler
;
188 /* A flag indicating that we are currently generating a core file. */
189 static int record_btrace_generating_corefile
;
191 /* The current branch trace configuration. */
192 static struct btrace_config record_btrace_conf
;
194 /* Command list for "record btrace". */
195 static struct cmd_list_element
*record_btrace_cmdlist
;
197 /* Command lists for "set/show record btrace bts". */
198 static struct cmd_list_element
*set_record_btrace_bts_cmdlist
;
199 static struct cmd_list_element
*show_record_btrace_bts_cmdlist
;
201 /* Command lists for "set/show record btrace pt". */
202 static struct cmd_list_element
*set_record_btrace_pt_cmdlist
;
203 static struct cmd_list_element
*show_record_btrace_pt_cmdlist
;
205 /* Command list for "set record btrace cpu". */
206 static struct cmd_list_element
*set_record_btrace_cpu_cmdlist
;
208 /* Print a record-btrace debug message. Use do ... while (0) to avoid
209 ambiguities when used in if statements. */
211 #define DEBUG(msg, args...) \
214 if (record_debug != 0) \
215 gdb_printf (gdb_stdlog, \
216 "[record-btrace] " msg "\n", ##args); \
221 /* Return the cpu configured by the user. Returns NULL if the cpu was
222 configured as auto. */
223 const struct btrace_cpu
*
224 record_btrace_get_cpu (void)
226 switch (record_btrace_cpu_state
)
232 record_btrace_cpu
.vendor
= CV_UNKNOWN
;
235 return &record_btrace_cpu
;
238 error (_("Internal error: bad record btrace cpu state."));
241 /* Update the branch trace for the current thread and return a pointer to its
244 Throws an error if there is no thread or no trace. This function never
247 static struct thread_info
*
248 require_btrace_thread (void)
252 if (inferior_ptid
== null_ptid
)
253 error (_("No thread."));
255 thread_info
*tp
= inferior_thread ();
257 validate_registers_access ();
259 btrace_fetch (tp
, record_btrace_get_cpu ());
261 if (btrace_is_empty (tp
))
262 error (_("No trace."));
267 /* Update the branch trace for the current thread and return a pointer to its
268 branch trace information struct.
270 Throws an error if there is no thread or no trace. This function never
273 static struct btrace_thread_info
*
274 require_btrace (void)
276 struct thread_info
*tp
;
278 tp
= require_btrace_thread ();
283 /* The new thread observer. */
286 record_btrace_on_new_thread (struct thread_info
*tp
)
288 /* Ignore this thread if its inferior is not recorded by us. */
289 target_ops
*rec
= tp
->inf
->target_at (record_stratum
);
290 if (rec
!= &record_btrace_ops
)
295 btrace_enable (tp
, &record_btrace_conf
);
297 catch (const gdb_exception_error
&error
)
299 warning ("%s", error
.what ());
303 /* Enable automatic tracing of new threads. */
306 record_btrace_auto_enable (void)
308 DEBUG ("attach thread observer");
310 gdb::observers::new_thread
.attach (record_btrace_on_new_thread
,
311 record_btrace_thread_observer_token
,
315 /* Disable automatic tracing of new threads. */
318 record_btrace_auto_disable (void)
320 DEBUG ("detach thread observer");
322 gdb::observers::new_thread
.detach (record_btrace_thread_observer_token
);
325 /* The record-btrace async event handler function. */
328 record_btrace_handle_async_inferior_event (gdb_client_data data
)
330 inferior_event_handler (INF_REG_EVENT
);
333 /* See record-btrace.h. */
336 record_btrace_push_target (void)
340 record_btrace_auto_enable ();
342 current_inferior ()->push_target (&record_btrace_ops
);
344 record_btrace_async_inferior_event_handler
345 = create_async_event_handler (record_btrace_handle_async_inferior_event
,
346 NULL
, "record-btrace");
347 record_btrace_generating_corefile
= 0;
349 format
= btrace_format_short_string (record_btrace_conf
.format
);
350 gdb::observers::record_changed
.notify (current_inferior (), 1, "btrace", format
);
353 /* Disable btrace on a set of threads on scope exit. */
355 struct scoped_btrace_disable
357 scoped_btrace_disable () = default;
359 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable
);
361 ~scoped_btrace_disable ()
363 for (thread_info
*tp
: m_threads
)
367 void add_thread (thread_info
*thread
)
369 m_threads
.push_front (thread
);
378 std::forward_list
<thread_info
*> m_threads
;
381 /* Open target record-btrace. */
384 record_btrace_target_open (const char *args
, int from_tty
)
386 /* If we fail to enable btrace for one thread, disable it for the threads for
387 which it was successfully enabled. */
388 scoped_btrace_disable btrace_disable
;
394 if (!target_has_execution ())
395 error (_("The program is not being run."));
397 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
398 if (args
== NULL
|| *args
== 0 || number_is_in_list (args
, tp
->global_num
))
400 btrace_enable (tp
, &record_btrace_conf
);
402 btrace_disable
.add_thread (tp
);
405 record_btrace_push_target ();
407 btrace_disable
.discard ();
410 /* The stop_recording method of target record-btrace. */
413 record_btrace_target::stop_recording ()
415 DEBUG ("stop recording");
417 record_btrace_auto_disable ();
419 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
420 if (tp
->btrace
.target
!= NULL
)
424 /* The disconnect method of target record-btrace. */
427 record_btrace_target::disconnect (const char *args
,
430 struct target_ops
*beneath
= this->beneath ();
432 /* Do not stop recording, just clean up GDB side. */
433 current_inferior ()->unpush_target (this);
435 /* Forward disconnect. */
436 beneath
->disconnect (args
, from_tty
);
439 /* The close method of target record-btrace. */
442 record_btrace_target::close ()
444 if (record_btrace_async_inferior_event_handler
!= NULL
)
445 delete_async_event_handler (&record_btrace_async_inferior_event_handler
);
447 /* Make sure automatic recording gets disabled even if we did not stop
448 recording before closing the record-btrace target. */
449 record_btrace_auto_disable ();
451 /* We should have already stopped recording.
452 Tear down btrace in case we have not. */
453 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
454 btrace_teardown (tp
);
457 /* The async method of target record-btrace. */
460 record_btrace_target::async (bool enable
)
463 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
465 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
467 this->beneath ()->async (enable
);
470 /* Adjusts the size and returns a human readable size suffix. */
473 record_btrace_adjust_size (unsigned int *size
)
479 if ((sz
& ((1u << 30) - 1)) == 0)
484 else if ((sz
& ((1u << 20) - 1)) == 0)
489 else if ((sz
& ((1u << 10) - 1)) == 0)
498 /* Print a BTS configuration. */
501 record_btrace_print_bts_conf (const struct btrace_config_bts
*conf
)
509 suffix
= record_btrace_adjust_size (&size
);
510 gdb_printf (_("Buffer size: %u%s.\n"), size
, suffix
);
514 /* Print an Intel Processor Trace configuration. */
517 record_btrace_print_pt_conf (const struct btrace_config_pt
*conf
)
525 suffix
= record_btrace_adjust_size (&size
);
526 gdb_printf (_("Buffer size: %u%s.\n"), size
, suffix
);
530 /* Print a branch tracing configuration. */
533 record_btrace_print_conf (const struct btrace_config
*conf
)
535 gdb_printf (_("Recording format: %s.\n"),
536 btrace_format_string (conf
->format
));
538 switch (conf
->format
)
540 case BTRACE_FORMAT_NONE
:
543 case BTRACE_FORMAT_BTS
:
544 record_btrace_print_bts_conf (&conf
->bts
);
547 case BTRACE_FORMAT_PT
:
548 record_btrace_print_pt_conf (&conf
->pt
);
552 internal_error (_("Unknown branch trace format."));
555 /* The info_record method of target record-btrace. */
558 record_btrace_target::info_record ()
560 struct btrace_thread_info
*btinfo
;
561 const struct btrace_config
*conf
;
562 struct thread_info
*tp
;
563 unsigned int insns
, calls
, gaps
;
567 if (inferior_ptid
== null_ptid
)
568 error (_("No thread."));
570 tp
= inferior_thread ();
572 validate_registers_access ();
574 btinfo
= &tp
->btrace
;
576 conf
= ::btrace_conf (btinfo
);
578 record_btrace_print_conf (conf
);
580 btrace_fetch (tp
, record_btrace_get_cpu ());
586 if (!btrace_is_empty (tp
))
588 struct btrace_call_iterator call
;
589 struct btrace_insn_iterator insn
;
591 btrace_call_end (&call
, btinfo
);
592 btrace_call_prev (&call
, 1);
593 calls
= btrace_call_number (&call
);
595 btrace_insn_end (&insn
, btinfo
);
596 insns
= btrace_insn_number (&insn
);
598 /* If the last instruction is not a gap, it is the current instruction
599 that is not actually part of the record. */
600 if (btrace_insn_get (&insn
) != NULL
)
603 gaps
= btinfo
->ngaps
;
606 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
607 "for thread %s (%s).\n"), insns
, calls
, gaps
,
608 print_thread_id (tp
),
609 target_pid_to_str (tp
->ptid
).c_str ());
611 if (btrace_is_replaying (tp
))
612 gdb_printf (_("Replay in progress. At instruction %u.\n"),
613 btrace_insn_number (btinfo
->replay
));
616 /* Print a decode error. */
619 btrace_ui_out_decode_error (struct ui_out
*uiout
, int errcode
,
620 enum btrace_format format
)
622 const char *errstr
= btrace_decode_error (format
, errcode
);
624 uiout
->text (_("["));
625 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
626 if (!(format
== BTRACE_FORMAT_PT
&& errcode
> 0))
628 uiout
->text (_("decode error ("));
629 uiout
->field_signed ("errcode", errcode
);
630 uiout
->text (_("): "));
632 uiout
->text (errstr
);
633 uiout
->text (_("]\n"));
636 /* A range of source lines. */
638 struct btrace_line_range
640 /* The symtab this line is from. */
641 struct symtab
*symtab
;
643 /* The first line (inclusive). */
646 /* The last line (exclusive). */
650 /* Construct a line range. */
652 static struct btrace_line_range
653 btrace_mk_line_range (struct symtab
*symtab
, int begin
, int end
)
655 struct btrace_line_range range
;
657 range
.symtab
= symtab
;
664 /* Add a line to a line range. */
666 static struct btrace_line_range
667 btrace_line_range_add (struct btrace_line_range range
, int line
)
669 if (range
.end
<= range
.begin
)
671 /* This is the first entry. */
673 range
.end
= line
+ 1;
675 else if (line
< range
.begin
)
677 else if (range
.end
< line
)
683 /* Return non-zero if RANGE is empty, zero otherwise. */
686 btrace_line_range_is_empty (struct btrace_line_range range
)
688 return range
.end
<= range
.begin
;
691 /* Return non-zero if LHS contains RHS, zero otherwise. */
694 btrace_line_range_contains_range (struct btrace_line_range lhs
,
695 struct btrace_line_range rhs
)
697 return ((lhs
.symtab
== rhs
.symtab
)
698 && (lhs
.begin
<= rhs
.begin
)
699 && (rhs
.end
<= lhs
.end
));
702 /* Find the line range associated with PC. */
704 static struct btrace_line_range
705 btrace_find_line_range (CORE_ADDR pc
)
707 struct btrace_line_range range
;
708 const linetable_entry
*lines
;
709 const linetable
*ltable
;
710 struct symtab
*symtab
;
713 symtab
= find_pc_line_symtab (pc
);
715 return btrace_mk_line_range (NULL
, 0, 0);
717 ltable
= symtab
->linetable ();
719 return btrace_mk_line_range (symtab
, 0, 0);
721 nlines
= ltable
->nitems
;
722 lines
= ltable
->item
;
724 return btrace_mk_line_range (symtab
, 0, 0);
726 struct objfile
*objfile
= symtab
->compunit ()->objfile ();
727 unrelocated_addr unrel_pc
728 = unrelocated_addr (pc
- objfile
->text_section_offset ());
730 range
= btrace_mk_line_range (symtab
, 0, 0);
731 for (i
= 0; i
< nlines
- 1; i
++)
733 /* The test of is_stmt here was added when the is_stmt field was
734 introduced to the 'struct linetable_entry' structure. This
735 ensured that this loop maintained the same behaviour as before we
736 introduced is_stmt. That said, it might be that we would be
737 better off not checking is_stmt here, this would lead to us
738 possibly adding more line numbers to the range. At the time this
739 change was made I was unsure how to test this so chose to go with
740 maintaining the existing experience. */
741 if (lines
[i
].raw_pc () == unrel_pc
&& lines
[i
].line
!= 0
743 range
= btrace_line_range_add (range
, lines
[i
].line
);
749 /* Print source lines in LINES to UIOUT.
751 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
752 instructions corresponding to that source line. When printing a new source
753 line, we do the cleanups for the open chain and open a new cleanup chain for
754 the new source line. If the source line range in LINES is not empty, this
755 function will leave the cleanup chain for the last printed source line open
756 so instructions can be added to it. */
759 btrace_print_lines (struct btrace_line_range lines
, struct ui_out
*uiout
,
760 gdb::optional
<ui_out_emit_tuple
> *src_and_asm_tuple
,
761 gdb::optional
<ui_out_emit_list
> *asm_list
,
762 gdb_disassembly_flags flags
)
764 print_source_lines_flags psl_flags
;
766 if (flags
& DISASSEMBLY_FILENAME
)
767 psl_flags
|= PRINT_SOURCE_LINES_FILENAME
;
769 for (int line
= lines
.begin
; line
< lines
.end
; ++line
)
773 src_and_asm_tuple
->emplace (uiout
, "src_and_asm_line");
775 print_source_lines (lines
.symtab
, line
, line
+ 1, psl_flags
);
777 asm_list
->emplace (uiout
, "line_asm_insn");
781 /* Disassemble a section of the recorded instruction trace. */
784 btrace_insn_history (struct ui_out
*uiout
,
785 const struct btrace_thread_info
*btinfo
,
786 const struct btrace_insn_iterator
*begin
,
787 const struct btrace_insn_iterator
*end
,
788 gdb_disassembly_flags flags
)
790 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags
,
791 btrace_insn_number (begin
), btrace_insn_number (end
));
793 flags
|= DISASSEMBLY_SPECULATIVE
;
795 struct gdbarch
*gdbarch
= target_gdbarch ();
796 btrace_line_range last_lines
= btrace_mk_line_range (NULL
, 0, 0);
798 ui_out_emit_list
list_emitter (uiout
, "asm_insns");
800 gdb::optional
<ui_out_emit_tuple
> src_and_asm_tuple
;
801 gdb::optional
<ui_out_emit_list
> asm_list
;
803 gdb_pretty_print_disassembler
disasm (gdbarch
, uiout
);
805 for (btrace_insn_iterator it
= *begin
; btrace_insn_cmp (&it
, end
) != 0;
806 btrace_insn_next (&it
, 1))
808 const struct btrace_insn
*insn
;
810 insn
= btrace_insn_get (&it
);
812 /* A NULL instruction indicates a gap in the trace. */
815 const struct btrace_config
*conf
;
817 conf
= btrace_conf (btinfo
);
819 /* We have trace so we must have a configuration. */
820 gdb_assert (conf
!= NULL
);
822 uiout
->field_fmt ("insn-number", "%u",
823 btrace_insn_number (&it
));
826 btrace_ui_out_decode_error (uiout
, btrace_insn_get_error (&it
),
831 struct disasm_insn dinsn
;
833 if ((flags
& DISASSEMBLY_SOURCE
) != 0)
835 struct btrace_line_range lines
;
837 lines
= btrace_find_line_range (insn
->pc
);
838 if (!btrace_line_range_is_empty (lines
)
839 && !btrace_line_range_contains_range (last_lines
, lines
))
841 btrace_print_lines (lines
, uiout
, &src_and_asm_tuple
, &asm_list
,
845 else if (!src_and_asm_tuple
.has_value ())
847 gdb_assert (!asm_list
.has_value ());
849 src_and_asm_tuple
.emplace (uiout
, "src_and_asm_line");
851 /* No source information. */
852 asm_list
.emplace (uiout
, "line_asm_insn");
855 gdb_assert (src_and_asm_tuple
.has_value ());
856 gdb_assert (asm_list
.has_value ());
859 memset (&dinsn
, 0, sizeof (dinsn
));
860 dinsn
.number
= btrace_insn_number (&it
);
861 dinsn
.addr
= insn
->pc
;
863 if ((insn
->flags
& BTRACE_INSN_FLAG_SPECULATIVE
) != 0)
864 dinsn
.is_speculative
= 1;
866 disasm
.pretty_print_insn (&dinsn
, flags
);
871 /* The insn_history method of target record-btrace. */
874 record_btrace_target::insn_history (int size
, gdb_disassembly_flags flags
)
876 struct btrace_thread_info
*btinfo
;
877 struct btrace_insn_history
*history
;
878 struct btrace_insn_iterator begin
, end
;
879 struct ui_out
*uiout
;
880 unsigned int context
, covered
;
882 uiout
= current_uiout
;
883 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
884 context
= abs (size
);
886 error (_("Bad record instruction-history-size."));
888 btinfo
= require_btrace ();
889 history
= btinfo
->insn_history
;
892 struct btrace_insn_iterator
*replay
;
894 DEBUG ("insn-history (0x%x): %d", (unsigned) flags
, size
);
896 /* If we're replaying, we start at the replay position. Otherwise, we
897 start at the tail of the trace. */
898 replay
= btinfo
->replay
;
902 btrace_insn_end (&begin
, btinfo
);
904 /* We start from here and expand in the requested direction. Then we
905 expand in the other direction, as well, to fill up any remaining
910 /* We want the current position covered, as well. */
911 covered
= btrace_insn_next (&end
, 1);
912 covered
+= btrace_insn_prev (&begin
, context
- covered
);
913 covered
+= btrace_insn_next (&end
, context
- covered
);
917 covered
= btrace_insn_next (&end
, context
);
918 covered
+= btrace_insn_prev (&begin
, context
- covered
);
923 begin
= history
->begin
;
926 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags
, size
,
927 btrace_insn_number (&begin
), btrace_insn_number (&end
));
932 covered
= btrace_insn_prev (&begin
, context
);
937 covered
= btrace_insn_next (&end
, context
);
942 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
946 gdb_printf (_("At the start of the branch trace record.\n"));
948 gdb_printf (_("At the end of the branch trace record.\n"));
951 btrace_set_insn_history (btinfo
, &begin
, &end
);
954 /* The insn_history_range method of target record-btrace. */
957 record_btrace_target::insn_history_range (ULONGEST from
, ULONGEST to
,
958 gdb_disassembly_flags flags
)
960 struct btrace_thread_info
*btinfo
;
961 struct btrace_insn_iterator begin
, end
;
962 struct ui_out
*uiout
;
963 unsigned int low
, high
;
966 uiout
= current_uiout
;
967 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
971 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags
, low
, high
);
973 /* Check for wrap-arounds. */
974 if (low
!= from
|| high
!= to
)
975 error (_("Bad range."));
978 error (_("Bad range."));
980 btinfo
= require_btrace ();
982 found
= btrace_find_insn_by_number (&begin
, btinfo
, low
);
984 error (_("Range out of bounds."));
986 found
= btrace_find_insn_by_number (&end
, btinfo
, high
);
989 /* Silently truncate the range. */
990 btrace_insn_end (&end
, btinfo
);
994 /* We want both begin and end to be inclusive. */
995 btrace_insn_next (&end
, 1);
998 btrace_insn_history (uiout
, btinfo
, &begin
, &end
, flags
);
999 btrace_set_insn_history (btinfo
, &begin
, &end
);
1002 /* The insn_history_from method of target record-btrace. */
1005 record_btrace_target::insn_history_from (ULONGEST from
, int size
,
1006 gdb_disassembly_flags flags
)
1008 ULONGEST begin
, end
, context
;
1010 context
= abs (size
);
1012 error (_("Bad record instruction-history-size."));
1021 begin
= from
- context
+ 1;
1026 end
= from
+ context
- 1;
1028 /* Check for wrap-around. */
1033 insn_history_range (begin
, end
, flags
);
1036 /* Print the instruction number range for a function call history line. */
1039 btrace_call_history_insn_range (struct ui_out
*uiout
,
1040 const struct btrace_function
*bfun
)
1042 unsigned int begin
, end
, size
;
1044 size
= bfun
->insn
.size ();
1045 gdb_assert (size
> 0);
1047 begin
= bfun
->insn_offset
;
1048 end
= begin
+ size
- 1;
1050 uiout
->field_unsigned ("insn begin", begin
);
1052 uiout
->field_unsigned ("insn end", end
);
1055 /* Compute the lowest and highest source line for the instructions in BFUN
1056 and return them in PBEGIN and PEND.
1057 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1058 result from inlining or macro expansion. */
1061 btrace_compute_src_line_range (const struct btrace_function
*bfun
,
1062 int *pbegin
, int *pend
)
1064 struct symtab
*symtab
;
1075 symtab
= sym
->symtab ();
1077 for (const btrace_insn
&insn
: bfun
->insn
)
1079 struct symtab_and_line sal
;
1081 sal
= find_pc_line (insn
.pc
, 0);
1082 if (sal
.symtab
!= symtab
|| sal
.line
== 0)
1085 begin
= std::min (begin
, sal
.line
);
1086 end
= std::max (end
, sal
.line
);
1094 /* Print the source line information for a function call history line. */
1097 btrace_call_history_src_line (struct ui_out
*uiout
,
1098 const struct btrace_function
*bfun
)
1107 uiout
->field_string ("file",
1108 symtab_to_filename_for_display (sym
->symtab ()),
1109 file_name_style
.style ());
1111 btrace_compute_src_line_range (bfun
, &begin
, &end
);
1116 uiout
->field_signed ("min line", begin
);
1122 uiout
->field_signed ("max line", end
);
1125 /* Get the name of a branch trace function. */
1128 btrace_get_bfun_name (const struct btrace_function
*bfun
)
1130 struct minimal_symbol
*msym
;
1140 return sym
->print_name ();
1141 else if (msym
!= NULL
)
1142 return msym
->print_name ();
1147 /* Disassemble a section of the recorded function trace. */
1150 btrace_call_history (struct ui_out
*uiout
,
1151 const struct btrace_thread_info
*btinfo
,
1152 const struct btrace_call_iterator
*begin
,
1153 const struct btrace_call_iterator
*end
,
1156 struct btrace_call_iterator it
;
1157 record_print_flags flags
= (enum record_print_flag
) int_flags
;
1159 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags
, btrace_call_number (begin
),
1160 btrace_call_number (end
));
1162 for (it
= *begin
; btrace_call_cmp (&it
, end
) < 0; btrace_call_next (&it
, 1))
1164 const struct btrace_function
*bfun
;
1165 struct minimal_symbol
*msym
;
1168 bfun
= btrace_call_get (&it
);
1172 /* Print the function index. */
1173 uiout
->field_unsigned ("index", bfun
->number
);
1176 /* Indicate gaps in the trace. */
1177 if (bfun
->errcode
!= 0)
1179 const struct btrace_config
*conf
;
1181 conf
= btrace_conf (btinfo
);
1183 /* We have trace so we must have a configuration. */
1184 gdb_assert (conf
!= NULL
);
1186 btrace_ui_out_decode_error (uiout
, bfun
->errcode
, conf
->format
);
1191 if ((flags
& RECORD_PRINT_INDENT_CALLS
) != 0)
1193 int level
= bfun
->level
+ btinfo
->level
, i
;
1195 for (i
= 0; i
< level
; ++i
)
1200 uiout
->field_string ("function", sym
->print_name (),
1201 function_name_style
.style ());
1202 else if (msym
!= NULL
)
1203 uiout
->field_string ("function", msym
->print_name (),
1204 function_name_style
.style ());
1205 else if (!uiout
->is_mi_like_p ())
1206 uiout
->field_string ("function", "??",
1207 function_name_style
.style ());
1209 if ((flags
& RECORD_PRINT_INSN_RANGE
) != 0)
1211 uiout
->text (_("\tinst "));
1212 btrace_call_history_insn_range (uiout
, bfun
);
1215 if ((flags
& RECORD_PRINT_SRC_LINE
) != 0)
1217 uiout
->text (_("\tat "));
1218 btrace_call_history_src_line (uiout
, bfun
);
1225 /* The call_history method of target record-btrace. */
1228 record_btrace_target::call_history (int size
, record_print_flags flags
)
1230 struct btrace_thread_info
*btinfo
;
1231 struct btrace_call_history
*history
;
1232 struct btrace_call_iterator begin
, end
;
1233 struct ui_out
*uiout
;
1234 unsigned int context
, covered
;
1236 uiout
= current_uiout
;
1237 ui_out_emit_tuple
tuple_emitter (uiout
, "insn history");
1238 context
= abs (size
);
1240 error (_("Bad record function-call-history-size."));
1242 btinfo
= require_btrace ();
1243 history
= btinfo
->call_history
;
1244 if (history
== NULL
)
1246 struct btrace_insn_iterator
*replay
;
1248 DEBUG ("call-history (0x%x): %d", (int) flags
, size
);
1250 /* If we're replaying, we start at the replay position. Otherwise, we
1251 start at the tail of the trace. */
1252 replay
= btinfo
->replay
;
1255 begin
.btinfo
= btinfo
;
1256 begin
.index
= replay
->call_index
;
1259 btrace_call_end (&begin
, btinfo
);
1261 /* We start from here and expand in the requested direction. Then we
1262 expand in the other direction, as well, to fill up any remaining
1267 /* We want the current position covered, as well. */
1268 covered
= btrace_call_next (&end
, 1);
1269 covered
+= btrace_call_prev (&begin
, context
- covered
);
1270 covered
+= btrace_call_next (&end
, context
- covered
);
1274 covered
= btrace_call_next (&end
, context
);
1275 covered
+= btrace_call_prev (&begin
, context
- covered
);
1280 begin
= history
->begin
;
1283 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags
, size
,
1284 btrace_call_number (&begin
), btrace_call_number (&end
));
1289 covered
= btrace_call_prev (&begin
, context
);
1294 covered
= btrace_call_next (&end
, context
);
1299 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1303 gdb_printf (_("At the start of the branch trace record.\n"));
1305 gdb_printf (_("At the end of the branch trace record.\n"));
1308 btrace_set_call_history (btinfo
, &begin
, &end
);
1311 /* The call_history_range method of target record-btrace. */
1314 record_btrace_target::call_history_range (ULONGEST from
, ULONGEST to
,
1315 record_print_flags flags
)
1317 struct btrace_thread_info
*btinfo
;
1318 struct btrace_call_iterator begin
, end
;
1319 struct ui_out
*uiout
;
1320 unsigned int low
, high
;
1323 uiout
= current_uiout
;
1324 ui_out_emit_tuple
tuple_emitter (uiout
, "func history");
1328 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags
, low
, high
);
1330 /* Check for wrap-arounds. */
1331 if (low
!= from
|| high
!= to
)
1332 error (_("Bad range."));
1335 error (_("Bad range."));
1337 btinfo
= require_btrace ();
1339 found
= btrace_find_call_by_number (&begin
, btinfo
, low
);
1341 error (_("Range out of bounds."));
1343 found
= btrace_find_call_by_number (&end
, btinfo
, high
);
1346 /* Silently truncate the range. */
1347 btrace_call_end (&end
, btinfo
);
1351 /* We want both begin and end to be inclusive. */
1352 btrace_call_next (&end
, 1);
1355 btrace_call_history (uiout
, btinfo
, &begin
, &end
, flags
);
1356 btrace_set_call_history (btinfo
, &begin
, &end
);
1359 /* The call_history_from method of target record-btrace. */
1362 record_btrace_target::call_history_from (ULONGEST from
, int size
,
1363 record_print_flags flags
)
1365 ULONGEST begin
, end
, context
;
1367 context
= abs (size
);
1369 error (_("Bad record function-call-history-size."));
1378 begin
= from
- context
+ 1;
1383 end
= from
+ context
- 1;
1385 /* Check for wrap-around. */
1390 call_history_range ( begin
, end
, flags
);
1393 /* The record_method method of target record-btrace. */
1396 record_btrace_target::record_method (ptid_t ptid
)
1398 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1399 thread_info
*const tp
= proc_target
->find_thread (ptid
);
1402 error (_("No thread."));
1404 if (tp
->btrace
.target
== NULL
)
1405 return RECORD_METHOD_NONE
;
1407 return RECORD_METHOD_BTRACE
;
1410 /* The record_is_replaying method of target record-btrace. */
1413 record_btrace_target::record_is_replaying (ptid_t ptid
)
1415 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
1416 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
1417 if (btrace_is_replaying (tp
))
1423 /* The record_will_replay method of target record-btrace. */
1426 record_btrace_target::record_will_replay (ptid_t ptid
, int dir
)
1428 return dir
== EXEC_REVERSE
|| record_is_replaying (ptid
);
1431 /* The xfer_partial method of target record-btrace. */
1433 enum target_xfer_status
1434 record_btrace_target::xfer_partial (enum target_object object
,
1435 const char *annex
, gdb_byte
*readbuf
,
1436 const gdb_byte
*writebuf
, ULONGEST offset
,
1437 ULONGEST len
, ULONGEST
*xfered_len
)
1439 /* Filter out requests that don't make sense during replay. */
1440 if (replay_memory_access
== replay_memory_access_read_only
1441 && !record_btrace_generating_corefile
1442 && record_is_replaying (inferior_ptid
))
1446 case TARGET_OBJECT_MEMORY
:
1448 const struct target_section
*section
;
1450 /* We do not allow writing memory in general. */
1451 if (writebuf
!= NULL
)
1454 return TARGET_XFER_UNAVAILABLE
;
1457 /* We allow reading readonly memory. */
1458 section
= target_section_by_addr (this, offset
);
1459 if (section
!= NULL
)
1461 /* Check if the section we found is readonly. */
1462 if ((bfd_section_flags (section
->the_bfd_section
)
1463 & SEC_READONLY
) != 0)
1465 /* Truncate the request to fit into this section. */
1466 len
= std::min (len
, section
->endaddr
- offset
);
1472 return TARGET_XFER_UNAVAILABLE
;
1477 /* Forward the request. */
1478 return this->beneath ()->xfer_partial (object
, annex
, readbuf
, writebuf
,
1479 offset
, len
, xfered_len
);
1482 /* The insert_breakpoint method of target record-btrace. */
1485 record_btrace_target::insert_breakpoint (struct gdbarch
*gdbarch
,
1486 struct bp_target_info
*bp_tgt
)
1491 /* Inserting breakpoints requires accessing memory. Allow it for the
1492 duration of this function. */
1493 old
= replay_memory_access
;
1494 replay_memory_access
= replay_memory_access_read_write
;
1499 ret
= this->beneath ()->insert_breakpoint (gdbarch
, bp_tgt
);
1501 catch (const gdb_exception
&except
)
1503 replay_memory_access
= old
;
1506 replay_memory_access
= old
;
1511 /* The remove_breakpoint method of target record-btrace. */
1514 record_btrace_target::remove_breakpoint (struct gdbarch
*gdbarch
,
1515 struct bp_target_info
*bp_tgt
,
1516 enum remove_bp_reason reason
)
1521 /* Removing breakpoints requires accessing memory. Allow it for the
1522 duration of this function. */
1523 old
= replay_memory_access
;
1524 replay_memory_access
= replay_memory_access_read_write
;
1529 ret
= this->beneath ()->remove_breakpoint (gdbarch
, bp_tgt
, reason
);
1531 catch (const gdb_exception
&except
)
1533 replay_memory_access
= old
;
1536 replay_memory_access
= old
;
1541 /* The fetch_registers method of target record-btrace. */
1544 record_btrace_target::fetch_registers (struct regcache
*regcache
, int regno
)
1546 btrace_insn_iterator
*replay
= nullptr;
1548 /* Thread-db may ask for a thread's registers before GDB knows about the
1549 thread. We forward the request to the target beneath in this
1551 thread_info
*tp
= regcache
->target ()->find_thread (regcache
->ptid ());
1553 replay
= tp
->btrace
.replay
;
1555 if (replay
!= nullptr && !record_btrace_generating_corefile
)
1557 const struct btrace_insn
*insn
;
1558 struct gdbarch
*gdbarch
;
1561 gdbarch
= regcache
->arch ();
1562 pcreg
= gdbarch_pc_regnum (gdbarch
);
1566 /* We can only provide the PC register. */
1567 if (regno
>= 0 && regno
!= pcreg
)
1570 insn
= btrace_insn_get (replay
);
1571 gdb_assert (insn
!= NULL
);
1573 regcache
->raw_supply (regno
, &insn
->pc
);
1576 this->beneath ()->fetch_registers (regcache
, regno
);
1579 /* The store_registers method of target record-btrace. */
1582 record_btrace_target::store_registers (struct regcache
*regcache
, int regno
)
1584 if (!record_btrace_generating_corefile
1585 && record_is_replaying (regcache
->ptid ()))
1586 error (_("Cannot write registers while replaying."));
1588 gdb_assert (may_write_registers
);
1590 this->beneath ()->store_registers (regcache
, regno
);
1593 /* The prepare_to_store method of target record-btrace. */
1596 record_btrace_target::prepare_to_store (struct regcache
*regcache
)
1598 if (!record_btrace_generating_corefile
1599 && record_is_replaying (regcache
->ptid ()))
1602 this->beneath ()->prepare_to_store (regcache
);
1605 /* The branch trace frame cache. */
1607 struct btrace_frame_cache
1610 struct thread_info
*tp
;
1612 /* The frame info. */
1615 /* The branch trace function segment. */
1616 const struct btrace_function
*bfun
;
1619 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1621 static htab_t bfcache
;
1623 /* hash_f for htab_create_alloc of bfcache. */
1626 bfcache_hash (const void *arg
)
1628 const struct btrace_frame_cache
*cache
1629 = (const struct btrace_frame_cache
*) arg
;
1631 return htab_hash_pointer (cache
->frame
);
1634 /* eq_f for htab_create_alloc of bfcache. */
1637 bfcache_eq (const void *arg1
, const void *arg2
)
1639 const struct btrace_frame_cache
*cache1
1640 = (const struct btrace_frame_cache
*) arg1
;
1641 const struct btrace_frame_cache
*cache2
1642 = (const struct btrace_frame_cache
*) arg2
;
1644 return cache1
->frame
== cache2
->frame
;
1647 /* Create a new btrace frame cache. */
1649 static struct btrace_frame_cache
*
1650 bfcache_new (frame_info_ptr frame
)
1652 struct btrace_frame_cache
*cache
;
1655 cache
= FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache
);
1656 cache
->frame
= frame
.get ();
1658 slot
= htab_find_slot (bfcache
, cache
, INSERT
);
1659 gdb_assert (*slot
== NULL
);
1665 /* Extract the branch trace function from a branch trace frame. */
1667 static const struct btrace_function
*
1668 btrace_get_frame_function (frame_info_ptr frame
)
1670 const struct btrace_frame_cache
*cache
;
1671 struct btrace_frame_cache pattern
;
1674 pattern
.frame
= frame
.get ();
1676 slot
= htab_find_slot (bfcache
, &pattern
, NO_INSERT
);
1680 cache
= (const struct btrace_frame_cache
*) *slot
;
1684 /* Implement stop_reason method for record_btrace_frame_unwind. */
1686 static enum unwind_stop_reason
1687 record_btrace_frame_unwind_stop_reason (frame_info_ptr this_frame
,
1690 const struct btrace_frame_cache
*cache
;
1691 const struct btrace_function
*bfun
;
1693 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1695 gdb_assert (bfun
!= NULL
);
1698 return UNWIND_UNAVAILABLE
;
1700 return UNWIND_NO_REASON
;
1703 /* Implement this_id method for record_btrace_frame_unwind. */
1706 record_btrace_frame_this_id (frame_info_ptr this_frame
, void **this_cache
,
1707 struct frame_id
*this_id
)
1709 const struct btrace_frame_cache
*cache
;
1710 const struct btrace_function
*bfun
;
1711 struct btrace_call_iterator it
;
1712 CORE_ADDR code
, special
;
1714 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1717 gdb_assert (bfun
!= NULL
);
1719 while (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->prev
) != 0)
1720 bfun
= btrace_call_get (&it
);
1722 code
= get_frame_func (this_frame
);
1723 special
= bfun
->number
;
1725 *this_id
= frame_id_build_unavailable_stack_special (code
, special
);
1727 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1728 btrace_get_bfun_name (cache
->bfun
),
1729 core_addr_to_string_nz (this_id
->code_addr
),
1730 core_addr_to_string_nz (this_id
->special_addr
));
1733 /* Implement prev_register method for record_btrace_frame_unwind. */
1735 static struct value
*
1736 record_btrace_frame_prev_register (frame_info_ptr this_frame
,
1740 const struct btrace_frame_cache
*cache
;
1741 const struct btrace_function
*bfun
, *caller
;
1742 struct btrace_call_iterator it
;
1743 struct gdbarch
*gdbarch
;
1747 gdbarch
= get_frame_arch (this_frame
);
1748 pcreg
= gdbarch_pc_regnum (gdbarch
);
1749 if (pcreg
< 0 || regnum
!= pcreg
)
1750 throw_error (NOT_AVAILABLE_ERROR
,
1751 _("Registers are not available in btrace record history"));
1753 cache
= (const struct btrace_frame_cache
*) *this_cache
;
1755 gdb_assert (bfun
!= NULL
);
1757 if (btrace_find_call_by_number (&it
, &cache
->tp
->btrace
, bfun
->up
) == 0)
1758 throw_error (NOT_AVAILABLE_ERROR
,
1759 _("No caller in btrace record history"));
1761 caller
= btrace_call_get (&it
);
1763 if ((bfun
->flags
& BFUN_UP_LINKS_TO_RET
) != 0)
1764 pc
= caller
->insn
.front ().pc
;
1767 pc
= caller
->insn
.back ().pc
;
1768 pc
+= gdb_insn_length (gdbarch
, pc
);
1771 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1772 btrace_get_bfun_name (bfun
), bfun
->level
,
1773 core_addr_to_string_nz (pc
));
1775 return frame_unwind_got_address (this_frame
, regnum
, pc
);
1778 /* Implement sniffer method for record_btrace_frame_unwind. */
1781 record_btrace_frame_sniffer (const struct frame_unwind
*self
,
1782 frame_info_ptr this_frame
,
1785 const struct btrace_function
*bfun
;
1786 struct btrace_frame_cache
*cache
;
1787 struct thread_info
*tp
;
1788 frame_info_ptr next
;
1790 /* THIS_FRAME does not contain a reference to its thread. */
1791 tp
= inferior_thread ();
1794 next
= get_next_frame (this_frame
);
1797 const struct btrace_insn_iterator
*replay
;
1799 replay
= tp
->btrace
.replay
;
1801 bfun
= &replay
->btinfo
->functions
[replay
->call_index
];
1805 const struct btrace_function
*callee
;
1806 struct btrace_call_iterator it
;
1808 callee
= btrace_get_frame_function (next
);
1809 if (callee
== NULL
|| (callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) != 0)
1812 if (btrace_find_call_by_number (&it
, &tp
->btrace
, callee
->up
) == 0)
1815 bfun
= btrace_call_get (&it
);
1821 DEBUG ("[frame] sniffed frame for %s on level %d",
1822 btrace_get_bfun_name (bfun
), bfun
->level
);
1824 /* This is our frame. Initialize the frame cache. */
1825 cache
= bfcache_new (this_frame
);
1829 *this_cache
= cache
;
1833 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1836 record_btrace_tailcall_frame_sniffer (const struct frame_unwind
*self
,
1837 frame_info_ptr this_frame
,
1840 const struct btrace_function
*bfun
, *callee
;
1841 struct btrace_frame_cache
*cache
;
1842 struct btrace_call_iterator it
;
1843 frame_info_ptr next
;
1844 struct thread_info
*tinfo
;
1846 next
= get_next_frame (this_frame
);
1850 callee
= btrace_get_frame_function (next
);
1854 if ((callee
->flags
& BFUN_UP_LINKS_TO_TAILCALL
) == 0)
1857 tinfo
= inferior_thread ();
1858 if (btrace_find_call_by_number (&it
, &tinfo
->btrace
, callee
->up
) == 0)
1861 bfun
= btrace_call_get (&it
);
1863 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1864 btrace_get_bfun_name (bfun
), bfun
->level
);
1866 /* This is our frame. Initialize the frame cache. */
1867 cache
= bfcache_new (this_frame
);
1871 *this_cache
= cache
;
1876 record_btrace_frame_dealloc_cache (frame_info
*self
, void *this_cache
)
1878 struct btrace_frame_cache
*cache
;
1881 cache
= (struct btrace_frame_cache
*) this_cache
;
1883 slot
= htab_find_slot (bfcache
, cache
, NO_INSERT
);
1884 gdb_assert (slot
!= NULL
);
1886 htab_remove_elt (bfcache
, cache
);
1889 /* btrace recording does not store previous memory content, neither the stack
1890 frames content. Any unwinding would return erroneous results as the stack
1891 contents no longer matches the changed PC value restored from history.
1892 Therefore this unwinder reports any possibly unwound registers as
1895 const struct frame_unwind record_btrace_frame_unwind
=
1899 record_btrace_frame_unwind_stop_reason
,
1900 record_btrace_frame_this_id
,
1901 record_btrace_frame_prev_register
,
1903 record_btrace_frame_sniffer
,
1904 record_btrace_frame_dealloc_cache
1907 const struct frame_unwind record_btrace_tailcall_frame_unwind
=
1909 "record-btrace tailcall",
1911 record_btrace_frame_unwind_stop_reason
,
1912 record_btrace_frame_this_id
,
1913 record_btrace_frame_prev_register
,
1915 record_btrace_tailcall_frame_sniffer
,
1916 record_btrace_frame_dealloc_cache
1919 /* Implement the get_unwinder method. */
1921 const struct frame_unwind
*
1922 record_btrace_target::get_unwinder ()
1924 return &record_btrace_frame_unwind
;
1927 /* Implement the get_tailcall_unwinder method. */
1929 const struct frame_unwind
*
1930 record_btrace_target::get_tailcall_unwinder ()
1932 return &record_btrace_tailcall_frame_unwind
;
1935 /* Return a human-readable string for FLAG. */
1938 btrace_thread_flag_to_str (btrace_thread_flags flag
)
1946 return "reverse-step";
1952 return "reverse-cont";
1961 /* Indicate that TP should be resumed according to FLAG. */
1964 record_btrace_resume_thread (struct thread_info
*tp
,
1965 enum btrace_thread_flag flag
)
1967 struct btrace_thread_info
*btinfo
;
1969 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp
),
1970 tp
->ptid
.to_string ().c_str (), flag
,
1971 btrace_thread_flag_to_str (flag
));
1973 btinfo
= &tp
->btrace
;
1975 /* Fetch the latest branch trace. */
1976 btrace_fetch (tp
, record_btrace_get_cpu ());
1978 /* A resume request overwrites a preceding resume or stop request. */
1979 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
1980 btinfo
->flags
|= flag
;
1983 /* Get the current frame for TP. */
1985 static struct frame_id
1986 get_thread_current_frame_id (struct thread_info
*tp
)
1988 /* Set current thread, which is implicitly used by
1989 get_current_frame. */
1990 scoped_restore_current_thread restore_thread
;
1992 switch_to_thread (tp
);
1994 process_stratum_target
*proc_target
= tp
->inf
->process_target ();
1996 /* Clear the executing flag to allow changes to the current frame.
1997 We are not actually running, yet. We just started a reverse execution
1998 command or a record goto command.
1999 For the latter, EXECUTING is false and this has no effect.
2000 For the former, EXECUTING is true and we're in wait, about to
2001 move the thread. Since we need to recompute the stack, we temporarily
2002 set EXECUTING to false. */
2003 bool executing
= tp
->executing ();
2004 set_executing (proc_target
, inferior_ptid
, false);
2007 set_executing (proc_target
, inferior_ptid
, executing
);
2009 return get_frame_id (get_current_frame ());
2012 /* Start replaying a thread. */
2014 static struct btrace_insn_iterator
*
2015 record_btrace_start_replaying (struct thread_info
*tp
)
2017 struct btrace_insn_iterator
*replay
;
2018 struct btrace_thread_info
*btinfo
;
2020 btinfo
= &tp
->btrace
;
2023 /* We can't start replaying without trace. */
2024 if (btinfo
->functions
.empty ())
2025 error (_("No trace."));
2027 /* GDB stores the current frame_id when stepping in order to detects steps
2029 Since frames are computed differently when we're replaying, we need to
2030 recompute those stored frames and fix them up so we can still detect
2031 subroutines after we started replaying. */
2034 struct frame_id frame_id
;
2035 int upd_step_frame_id
, upd_step_stack_frame_id
;
2037 /* The current frame without replaying - computed via normal unwind. */
2038 frame_id
= get_thread_current_frame_id (tp
);
2040 /* Check if we need to update any stepping-related frame id's. */
2041 upd_step_frame_id
= (frame_id
== tp
->control
.step_frame_id
);
2042 upd_step_stack_frame_id
= (frame_id
== tp
->control
.step_stack_frame_id
);
2044 /* We start replaying at the end of the branch trace. This corresponds
2045 to the current instruction. */
2046 replay
= XNEW (struct btrace_insn_iterator
);
2047 btrace_insn_end (replay
, btinfo
);
2049 /* Skip gaps at the end of the trace. */
2050 while (btrace_insn_get (replay
) == NULL
)
2054 steps
= btrace_insn_prev (replay
, 1);
2056 error (_("No trace."));
2059 /* We're not replaying, yet. */
2060 gdb_assert (btinfo
->replay
== NULL
);
2061 btinfo
->replay
= replay
;
2063 /* Make sure we're not using any stale registers. */
2064 registers_changed_thread (tp
);
2066 /* The current frame with replaying - computed via btrace unwind. */
2067 frame_id
= get_thread_current_frame_id (tp
);
2069 /* Replace stepping related frames where necessary. */
2070 if (upd_step_frame_id
)
2071 tp
->control
.step_frame_id
= frame_id
;
2072 if (upd_step_stack_frame_id
)
2073 tp
->control
.step_stack_frame_id
= frame_id
;
2075 catch (const gdb_exception
&except
)
2077 xfree (btinfo
->replay
);
2078 btinfo
->replay
= NULL
;
2080 registers_changed_thread (tp
);
2088 /* Stop replaying a thread. */
2091 record_btrace_stop_replaying (struct thread_info
*tp
)
2093 struct btrace_thread_info
*btinfo
;
2095 btinfo
= &tp
->btrace
;
2097 xfree (btinfo
->replay
);
2098 btinfo
->replay
= NULL
;
2100 /* Make sure we're not leaving any stale registers. */
2101 registers_changed_thread (tp
);
2104 /* Stop replaying TP if it is at the end of its execution history. */
2107 record_btrace_stop_replaying_at_end (struct thread_info
*tp
)
2109 struct btrace_insn_iterator
*replay
, end
;
2110 struct btrace_thread_info
*btinfo
;
2112 btinfo
= &tp
->btrace
;
2113 replay
= btinfo
->replay
;
2118 btrace_insn_end (&end
, btinfo
);
2120 if (btrace_insn_cmp (replay
, &end
) == 0)
2121 record_btrace_stop_replaying (tp
);
2124 /* The resume method of target record-btrace. */
2127 record_btrace_target::resume (ptid_t ptid
, int step
, enum gdb_signal signal
)
2129 enum btrace_thread_flag flag
, cflag
;
2131 DEBUG ("resume %s: %s%s", ptid
.to_string ().c_str (),
2132 ::execution_direction
== EXEC_REVERSE
? "reverse-" : "",
2133 step
? "step" : "cont");
2135 /* Store the execution direction of the last resume.
2137 If there is more than one resume call, we have to rely on infrun
2138 to not change the execution direction in-between. */
2139 record_btrace_resume_exec_dir
= ::execution_direction
;
2141 /* As long as we're not replaying, just forward the request.
2143 For non-stop targets this means that no thread is replaying. In order to
2144 make progress, we may need to explicitly move replaying threads to the end
2145 of their execution history. */
2146 if ((::execution_direction
!= EXEC_REVERSE
)
2147 && !record_is_replaying (minus_one_ptid
))
2149 this->beneath ()->resume (ptid
, step
, signal
);
2153 /* Compute the btrace thread flag for the requested move. */
2154 if (::execution_direction
== EXEC_REVERSE
)
2156 flag
= step
== 0 ? BTHR_RCONT
: BTHR_RSTEP
;
2161 flag
= step
== 0 ? BTHR_CONT
: BTHR_STEP
;
2165 /* We just indicate the resume intent here. The actual stepping happens in
2166 record_btrace_wait below.
2168 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2170 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2172 if (!target_is_non_stop_p ())
2174 gdb_assert (inferior_ptid
.matches (ptid
));
2176 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2178 if (tp
->ptid
.matches (inferior_ptid
))
2179 record_btrace_resume_thread (tp
, flag
);
2181 record_btrace_resume_thread (tp
, cflag
);
2186 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2187 record_btrace_resume_thread (tp
, flag
);
2190 /* Async support. */
2191 if (target_can_async_p ())
2193 target_async (true);
2194 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2198 /* Cancel resuming TP. */
2201 record_btrace_cancel_resume (struct thread_info
*tp
)
2203 btrace_thread_flags flags
;
2205 flags
= tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
);
2209 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2210 print_thread_id (tp
),
2211 tp
->ptid
.to_string ().c_str (), flags
.raw (),
2212 btrace_thread_flag_to_str (flags
));
2214 tp
->btrace
.flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2215 record_btrace_stop_replaying_at_end (tp
);
2218 /* Return a target_waitstatus indicating that we ran out of history. */
2220 static struct target_waitstatus
2221 btrace_step_no_history (void)
2223 struct target_waitstatus status
;
2225 status
.set_no_history ();
2230 /* Return a target_waitstatus indicating that a step finished. */
2232 static struct target_waitstatus
2233 btrace_step_stopped (void)
2235 struct target_waitstatus status
;
2237 status
.set_stopped (GDB_SIGNAL_TRAP
);
2242 /* Return a target_waitstatus indicating that a thread was stopped as
2245 static struct target_waitstatus
2246 btrace_step_stopped_on_request (void)
2248 struct target_waitstatus status
;
2250 status
.set_stopped (GDB_SIGNAL_0
);
2255 /* Return a target_waitstatus indicating a spurious stop. */
2257 static struct target_waitstatus
2258 btrace_step_spurious (void)
2260 struct target_waitstatus status
;
2262 status
.set_spurious ();
2267 /* Return a target_waitstatus indicating that the thread was not resumed. */
2269 static struct target_waitstatus
2270 btrace_step_no_resumed (void)
2272 struct target_waitstatus status
;
2274 status
.set_no_resumed ();
2279 /* Return a target_waitstatus indicating that we should wait again. */
2281 static struct target_waitstatus
2282 btrace_step_again (void)
2284 struct target_waitstatus status
;
2286 status
.set_ignore ();
2291 /* Clear the record histories. */
2294 record_btrace_clear_histories (struct btrace_thread_info
*btinfo
)
2296 xfree (btinfo
->insn_history
);
2297 xfree (btinfo
->call_history
);
2299 btinfo
->insn_history
= NULL
;
2300 btinfo
->call_history
= NULL
;
2303 /* Check whether TP's current replay position is at a breakpoint. */
2306 record_btrace_replay_at_breakpoint (struct thread_info
*tp
)
2308 struct btrace_insn_iterator
*replay
;
2309 struct btrace_thread_info
*btinfo
;
2310 const struct btrace_insn
*insn
;
2312 btinfo
= &tp
->btrace
;
2313 replay
= btinfo
->replay
;
2318 insn
= btrace_insn_get (replay
);
2322 return record_check_stopped_by_breakpoint (tp
->inf
->aspace
, insn
->pc
,
2323 &btinfo
->stop_reason
);
2326 /* Step one instruction in forward direction. */
2328 static struct target_waitstatus
2329 record_btrace_single_step_forward (struct thread_info
*tp
)
2331 struct btrace_insn_iterator
*replay
, end
, start
;
2332 struct btrace_thread_info
*btinfo
;
2334 btinfo
= &tp
->btrace
;
2335 replay
= btinfo
->replay
;
2337 /* We're done if we're not replaying. */
2339 return btrace_step_no_history ();
2341 /* Check if we're stepping a breakpoint. */
2342 if (record_btrace_replay_at_breakpoint (tp
))
2343 return btrace_step_stopped ();
2345 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2346 jump back to the instruction at which we started. */
2352 /* We will bail out here if we continue stepping after reaching the end
2353 of the execution history. */
2354 steps
= btrace_insn_next (replay
, 1);
2358 return btrace_step_no_history ();
2361 while (btrace_insn_get (replay
) == NULL
);
2363 /* Determine the end of the instruction trace. */
2364 btrace_insn_end (&end
, btinfo
);
2366 /* The execution trace contains (and ends with) the current instruction.
2367 This instruction has not been executed, yet, so the trace really ends
2368 one instruction earlier. */
2369 if (btrace_insn_cmp (replay
, &end
) == 0)
2370 return btrace_step_no_history ();
2372 return btrace_step_spurious ();
2375 /* Step one instruction in backward direction. */
2377 static struct target_waitstatus
2378 record_btrace_single_step_backward (struct thread_info
*tp
)
2380 struct btrace_insn_iterator
*replay
, start
;
2381 struct btrace_thread_info
*btinfo
;
2383 btinfo
= &tp
->btrace
;
2384 replay
= btinfo
->replay
;
2386 /* Start replaying if we're not already doing so. */
2388 replay
= record_btrace_start_replaying (tp
);
2390 /* If we can't step any further, we reached the end of the history.
2391 Skip gaps during replay. If we end up at a gap (at the beginning of
2392 the trace), jump back to the instruction at which we started. */
2398 steps
= btrace_insn_prev (replay
, 1);
2402 return btrace_step_no_history ();
2405 while (btrace_insn_get (replay
) == NULL
);
2407 /* Check if we're stepping a breakpoint.
2409 For reverse-stepping, this check is after the step. There is logic in
2410 infrun.c that handles reverse-stepping separately. See, for example,
2411 proceed and adjust_pc_after_break.
2413 This code assumes that for reverse-stepping, PC points to the last
2414 de-executed instruction, whereas for forward-stepping PC points to the
2415 next to-be-executed instruction. */
2416 if (record_btrace_replay_at_breakpoint (tp
))
2417 return btrace_step_stopped ();
2419 return btrace_step_spurious ();
2422 /* Step a single thread. */
2424 static struct target_waitstatus
2425 record_btrace_step_thread (struct thread_info
*tp
)
2427 struct btrace_thread_info
*btinfo
;
2428 struct target_waitstatus status
;
2429 btrace_thread_flags flags
;
2431 btinfo
= &tp
->btrace
;
2433 flags
= btinfo
->flags
& (BTHR_MOVE
| BTHR_STOP
);
2434 btinfo
->flags
&= ~(BTHR_MOVE
| BTHR_STOP
);
2436 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp
),
2437 tp
->ptid
.to_string ().c_str (), flags
.raw (),
2438 btrace_thread_flag_to_str (flags
));
2440 /* We can't step without an execution history. */
2441 if ((flags
& BTHR_MOVE
) != 0 && btrace_is_empty (tp
))
2442 return btrace_step_no_history ();
2447 internal_error (_("invalid stepping type."));
2450 return btrace_step_stopped_on_request ();
2453 status
= record_btrace_single_step_forward (tp
);
2454 if (status
.kind () != TARGET_WAITKIND_SPURIOUS
)
2457 return btrace_step_stopped ();
2460 status
= record_btrace_single_step_backward (tp
);
2461 if (status
.kind () != TARGET_WAITKIND_SPURIOUS
)
2464 return btrace_step_stopped ();
2467 status
= record_btrace_single_step_forward (tp
);
2468 if (status
.kind () != TARGET_WAITKIND_SPURIOUS
)
2471 btinfo
->flags
|= flags
;
2472 return btrace_step_again ();
2475 status
= record_btrace_single_step_backward (tp
);
2476 if (status
.kind () != TARGET_WAITKIND_SPURIOUS
)
2479 btinfo
->flags
|= flags
;
2480 return btrace_step_again ();
2483 /* We keep threads moving at the end of their execution history. The wait
2484 method will stop the thread for whom the event is reported. */
2485 if (status
.kind () == TARGET_WAITKIND_NO_HISTORY
)
2486 btinfo
->flags
|= flags
;
2491 /* Announce further events if necessary. */
2494 record_btrace_maybe_mark_async_event
2495 (const std::vector
<thread_info
*> &moving
,
2496 const std::vector
<thread_info
*> &no_history
)
2498 bool more_moving
= !moving
.empty ();
2499 bool more_no_history
= !no_history
.empty ();;
2501 if (!more_moving
&& !more_no_history
)
2505 DEBUG ("movers pending");
2507 if (more_no_history
)
2508 DEBUG ("no-history pending");
2510 mark_async_event_handler (record_btrace_async_inferior_event_handler
);
2513 /* The wait method of target record-btrace. */
2516 record_btrace_target::wait (ptid_t ptid
, struct target_waitstatus
*status
,
2517 target_wait_flags options
)
2519 std::vector
<thread_info
*> moving
;
2520 std::vector
<thread_info
*> no_history
;
2522 /* Clear this, if needed we'll re-mark it below. */
2523 clear_async_event_handler (record_btrace_async_inferior_event_handler
);
2525 DEBUG ("wait %s (0x%x)", ptid
.to_string ().c_str (),
2526 (unsigned) options
);
2528 /* As long as we're not replaying, just forward the request. */
2529 if ((::execution_direction
!= EXEC_REVERSE
)
2530 && !record_is_replaying (minus_one_ptid
))
2532 return this->beneath ()->wait (ptid
, status
, options
);
2535 /* Keep a work list of moving threads. */
2536 process_stratum_target
*proc_target
= current_inferior ()->process_target ();
2537 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2538 if ((tp
->btrace
.flags
& (BTHR_MOVE
| BTHR_STOP
)) != 0)
2539 moving
.push_back (tp
);
2541 if (moving
.empty ())
2543 *status
= btrace_step_no_resumed ();
2545 DEBUG ("wait ended by %s: %s", null_ptid
.to_string ().c_str (),
2546 status
->to_string ().c_str ());
2551 /* Step moving threads one by one, one step each, until either one thread
2552 reports an event or we run out of threads to step.
2554 When stepping more than one thread, chances are that some threads reach
2555 the end of their execution history earlier than others. If we reported
2556 this immediately, all-stop on top of non-stop would stop all threads and
2557 resume the same threads next time. And we would report the same thread
2558 having reached the end of its execution history again.
2560 In the worst case, this would starve the other threads. But even if other
2561 threads would be allowed to make progress, this would result in far too
2562 many intermediate stops.
2564 We therefore delay the reporting of "no execution history" until we have
2565 nothing else to report. By this time, all threads should have moved to
2566 either the beginning or the end of their execution history. There will
2567 be a single user-visible stop. */
2568 struct thread_info
*eventing
= NULL
;
2569 while ((eventing
== NULL
) && !moving
.empty ())
2571 for (unsigned int ix
= 0; eventing
== NULL
&& ix
< moving
.size ();)
2573 thread_info
*tp
= moving
[ix
];
2575 *status
= record_btrace_step_thread (tp
);
2577 switch (status
->kind ())
2579 case TARGET_WAITKIND_IGNORE
:
2583 case TARGET_WAITKIND_NO_HISTORY
:
2584 no_history
.push_back (ordered_remove (moving
, ix
));
2588 eventing
= unordered_remove (moving
, ix
);
2594 if (eventing
== NULL
)
2596 /* We started with at least one moving thread. This thread must have
2597 either stopped or reached the end of its execution history.
2599 In the former case, EVENTING must not be NULL.
2600 In the latter case, NO_HISTORY must not be empty. */
2601 gdb_assert (!no_history
.empty ());
2603 /* We kept threads moving at the end of their execution history. Stop
2604 EVENTING now that we are going to report its stop. */
2605 eventing
= unordered_remove (no_history
, 0);
2606 eventing
->btrace
.flags
&= ~BTHR_MOVE
;
2608 *status
= btrace_step_no_history ();
2611 gdb_assert (eventing
!= NULL
);
2613 /* We kept threads replaying at the end of their execution history. Stop
2614 replaying EVENTING now that we are going to report its stop. */
2615 record_btrace_stop_replaying_at_end (eventing
);
2617 /* Stop all other threads. */
2618 if (!target_is_non_stop_p ())
2620 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
2621 record_btrace_cancel_resume (tp
);
2624 /* In async mode, we need to announce further events. */
2625 if (target_is_async_p ())
2626 record_btrace_maybe_mark_async_event (moving
, no_history
);
2628 /* Start record histories anew from the current position. */
2629 record_btrace_clear_histories (&eventing
->btrace
);
2631 /* We moved the replay position but did not update registers. */
2632 registers_changed_thread (eventing
);
2634 DEBUG ("wait ended by thread %s (%s): %s",
2635 print_thread_id (eventing
),
2636 eventing
->ptid
.to_string ().c_str (),
2637 status
->to_string ().c_str ());
2639 return eventing
->ptid
;
2642 /* The stop method of target record-btrace. */
2645 record_btrace_target::stop (ptid_t ptid
)
2647 DEBUG ("stop %s", ptid
.to_string ().c_str ());
2649 /* As long as we're not replaying, just forward the request. */
2650 if ((::execution_direction
!= EXEC_REVERSE
)
2651 && !record_is_replaying (minus_one_ptid
))
2653 this->beneath ()->stop (ptid
);
2657 process_stratum_target
*proc_target
2658 = current_inferior ()->process_target ();
2660 for (thread_info
*tp
: all_non_exited_threads (proc_target
, ptid
))
2662 tp
->btrace
.flags
&= ~BTHR_MOVE
;
2663 tp
->btrace
.flags
|= BTHR_STOP
;
2668 /* The can_execute_reverse method of target record-btrace. */
2671 record_btrace_target::can_execute_reverse ()
2676 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2679 record_btrace_target::stopped_by_sw_breakpoint ()
2681 if (record_is_replaying (minus_one_ptid
))
2683 struct thread_info
*tp
= inferior_thread ();
2685 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_SW_BREAKPOINT
;
2688 return this->beneath ()->stopped_by_sw_breakpoint ();
2691 /* The supports_stopped_by_sw_breakpoint method of target
2695 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2697 if (record_is_replaying (minus_one_ptid
))
2700 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2703 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2706 record_btrace_target::stopped_by_hw_breakpoint ()
2708 if (record_is_replaying (minus_one_ptid
))
2710 struct thread_info
*tp
= inferior_thread ();
2712 return tp
->btrace
.stop_reason
== TARGET_STOPPED_BY_HW_BREAKPOINT
;
2715 return this->beneath ()->stopped_by_hw_breakpoint ();
2718 /* The supports_stopped_by_hw_breakpoint method of target
2722 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2724 if (record_is_replaying (minus_one_ptid
))
2727 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2730 /* The update_thread_list method of target record-btrace. */
2733 record_btrace_target::update_thread_list ()
2735 /* We don't add or remove threads during replay. */
2736 if (record_is_replaying (minus_one_ptid
))
2739 /* Forward the request. */
2740 this->beneath ()->update_thread_list ();
2743 /* The thread_alive method of target record-btrace. */
2746 record_btrace_target::thread_alive (ptid_t ptid
)
2748 /* We don't add or remove threads during replay. */
2749 if (record_is_replaying (minus_one_ptid
))
2752 /* Forward the request. */
2753 return this->beneath ()->thread_alive (ptid
);
2756 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2760 record_btrace_set_replay (struct thread_info
*tp
,
2761 const struct btrace_insn_iterator
*it
)
2763 struct btrace_thread_info
*btinfo
;
2765 btinfo
= &tp
->btrace
;
2768 record_btrace_stop_replaying (tp
);
2771 if (btinfo
->replay
== NULL
)
2772 record_btrace_start_replaying (tp
);
2773 else if (btrace_insn_cmp (btinfo
->replay
, it
) == 0)
2776 *btinfo
->replay
= *it
;
2777 registers_changed_thread (tp
);
2780 /* Start anew from the new replay position. */
2781 record_btrace_clear_histories (btinfo
);
2783 inferior_thread ()->set_stop_pc (regcache_read_pc (get_current_regcache ()));
2784 print_stack_frame (get_selected_frame (NULL
), 1, SRC_AND_LOC
, 1);
2787 /* The goto_record_begin method of target record-btrace. */
2790 record_btrace_target::goto_record_begin ()
2792 struct thread_info
*tp
;
2793 struct btrace_insn_iterator begin
;
2795 tp
= require_btrace_thread ();
2797 btrace_insn_begin (&begin
, &tp
->btrace
);
2799 /* Skip gaps at the beginning of the trace. */
2800 while (btrace_insn_get (&begin
) == NULL
)
2804 steps
= btrace_insn_next (&begin
, 1);
2806 error (_("No trace."));
2809 record_btrace_set_replay (tp
, &begin
);
2812 /* The goto_record_end method of target record-btrace. */
2815 record_btrace_target::goto_record_end ()
2817 struct thread_info
*tp
;
2819 tp
= require_btrace_thread ();
2821 record_btrace_set_replay (tp
, NULL
);
2824 /* The goto_record method of target record-btrace. */
2827 record_btrace_target::goto_record (ULONGEST insn
)
2829 struct thread_info
*tp
;
2830 struct btrace_insn_iterator it
;
2831 unsigned int number
;
2836 /* Check for wrap-arounds. */
2838 error (_("Instruction number out of range."));
2840 tp
= require_btrace_thread ();
2842 found
= btrace_find_insn_by_number (&it
, &tp
->btrace
, number
);
2844 /* Check if the instruction could not be found or is a gap. */
2845 if (found
== 0 || btrace_insn_get (&it
) == NULL
)
2846 error (_("No such instruction."));
2848 record_btrace_set_replay (tp
, &it
);
2851 /* The record_stop_replaying method of target record-btrace. */
2854 record_btrace_target::record_stop_replaying ()
2856 for (thread_info
*tp
: current_inferior ()->non_exited_threads ())
2857 record_btrace_stop_replaying (tp
);
2860 /* The execution_direction target method. */
2862 enum exec_direction_kind
2863 record_btrace_target::execution_direction ()
2865 return record_btrace_resume_exec_dir
;
2868 /* The prepare_to_generate_core target method. */
2871 record_btrace_target::prepare_to_generate_core ()
2873 record_btrace_generating_corefile
= 1;
2876 /* The done_generating_core target method. */
2879 record_btrace_target::done_generating_core ()
2881 record_btrace_generating_corefile
= 0;
2884 /* Start recording in BTS format. */
2887 cmd_record_btrace_bts_start (const char *args
, int from_tty
)
2889 if (args
!= NULL
&& *args
!= 0)
2890 error (_("Invalid argument."));
2892 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2896 execute_command ("target record-btrace", from_tty
);
2898 catch (const gdb_exception
&exception
)
2900 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2905 /* Start recording in Intel Processor Trace format. */
2908 cmd_record_btrace_pt_start (const char *args
, int from_tty
)
2910 if (args
!= NULL
&& *args
!= 0)
2911 error (_("Invalid argument."));
2913 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2917 execute_command ("target record-btrace", from_tty
);
2919 catch (const gdb_exception
&exception
)
2921 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2926 /* Alias for "target record". */
2929 cmd_record_btrace_start (const char *args
, int from_tty
)
2931 if (args
!= NULL
&& *args
!= 0)
2932 error (_("Invalid argument."));
2934 record_btrace_conf
.format
= BTRACE_FORMAT_PT
;
2938 execute_command ("target record-btrace", from_tty
);
2940 catch (const gdb_exception_error
&exception
)
2942 record_btrace_conf
.format
= BTRACE_FORMAT_BTS
;
2946 execute_command ("target record-btrace", from_tty
);
2948 catch (const gdb_exception
&ex
)
2950 record_btrace_conf
.format
= BTRACE_FORMAT_NONE
;
2956 /* The "show record btrace replay-memory-access" command. */
2959 cmd_show_replay_memory_access (struct ui_file
*file
, int from_tty
,
2960 struct cmd_list_element
*c
, const char *value
)
2962 gdb_printf (file
, _("Replay memory access is %s.\n"),
2963 replay_memory_access
);
2966 /* The "set record btrace cpu none" command. */
2969 cmd_set_record_btrace_cpu_none (const char *args
, int from_tty
)
2971 if (args
!= nullptr && *args
!= 0)
2972 error (_("Trailing junk: '%s'."), args
);
2974 record_btrace_cpu_state
= CS_NONE
;
2977 /* The "set record btrace cpu auto" command. */
2980 cmd_set_record_btrace_cpu_auto (const char *args
, int from_tty
)
2982 if (args
!= nullptr && *args
!= 0)
2983 error (_("Trailing junk: '%s'."), args
);
2985 record_btrace_cpu_state
= CS_AUTO
;
2988 /* The "set record btrace cpu" command. */
2991 cmd_set_record_btrace_cpu (const char *args
, int from_tty
)
2993 if (args
== nullptr)
2996 /* We use a hard-coded vendor string for now. */
2997 unsigned int family
, model
, stepping
;
2998 int l1
, l2
, matches
= sscanf (args
, "intel: %u/%u%n/%u%n", &family
,
2999 &model
, &l1
, &stepping
, &l2
);
3002 if (strlen (args
) != l2
)
3003 error (_("Trailing junk: '%s'."), args
+ l2
);
3005 else if (matches
== 2)
3007 if (strlen (args
) != l1
)
3008 error (_("Trailing junk: '%s'."), args
+ l1
);
3013 error (_("Bad format. See \"help set record btrace cpu\"."));
3015 if (USHRT_MAX
< family
)
3016 error (_("Cpu family too big."));
3018 if (UCHAR_MAX
< model
)
3019 error (_("Cpu model too big."));
3021 if (UCHAR_MAX
< stepping
)
3022 error (_("Cpu stepping too big."));
3024 record_btrace_cpu
.vendor
= CV_INTEL
;
3025 record_btrace_cpu
.family
= family
;
3026 record_btrace_cpu
.model
= model
;
3027 record_btrace_cpu
.stepping
= stepping
;
3029 record_btrace_cpu_state
= CS_CPU
;
3032 /* The "show record btrace cpu" command. */
3035 cmd_show_record_btrace_cpu (const char *args
, int from_tty
)
3037 if (args
!= nullptr && *args
!= 0)
3038 error (_("Trailing junk: '%s'."), args
);
3040 switch (record_btrace_cpu_state
)
3043 gdb_printf (_("btrace cpu is 'auto'.\n"));
3047 gdb_printf (_("btrace cpu is 'none'.\n"));
3051 switch (record_btrace_cpu
.vendor
)
3054 if (record_btrace_cpu
.stepping
== 0)
3055 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
3056 record_btrace_cpu
.family
,
3057 record_btrace_cpu
.model
);
3059 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3060 record_btrace_cpu
.family
,
3061 record_btrace_cpu
.model
,
3062 record_btrace_cpu
.stepping
);
3067 error (_("Internal error: bad cpu state."));
3070 /* The "record bts buffer-size" show value function. */
3073 show_record_bts_buffer_size_value (struct ui_file
*file
, int from_tty
,
3074 struct cmd_list_element
*c
,
3077 gdb_printf (file
, _("The record/replay bts buffer size is %s.\n"),
3081 /* The "record pt buffer-size" show value function. */
3084 show_record_pt_buffer_size_value (struct ui_file
*file
, int from_tty
,
3085 struct cmd_list_element
*c
,
3088 gdb_printf (file
, _("The record/replay pt buffer size is %s.\n"),
3092 /* Initialize btrace commands. */
3094 void _initialize_record_btrace ();
3096 _initialize_record_btrace ()
3098 cmd_list_element
*record_btrace_cmd
3099 = add_prefix_cmd ("btrace", class_obscure
, cmd_record_btrace_start
,
3100 _("Start branch trace recording."),
3101 &record_btrace_cmdlist
, 0, &record_cmdlist
);
3102 add_alias_cmd ("b", record_btrace_cmd
, class_obscure
, 1, &record_cmdlist
);
3104 cmd_list_element
*record_btrace_bts_cmd
3105 = add_cmd ("bts", class_obscure
, cmd_record_btrace_bts_start
,
3107 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3108 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3109 This format may not be available on all processors."),
3110 &record_btrace_cmdlist
);
3111 add_alias_cmd ("bts", record_btrace_bts_cmd
, class_obscure
, 1,
3114 cmd_list_element
*record_btrace_pt_cmd
3115 = add_cmd ("pt", class_obscure
, cmd_record_btrace_pt_start
,
3117 Start branch trace recording in Intel Processor Trace format.\n\n\
3118 This format may not be available on all processors."),
3119 &record_btrace_cmdlist
);
3120 add_alias_cmd ("pt", record_btrace_pt_cmd
, class_obscure
, 1, &record_cmdlist
);
3122 add_setshow_prefix_cmd ("btrace", class_support
,
3123 _("Set record options."),
3124 _("Show record options."),
3125 &set_record_btrace_cmdlist
,
3126 &show_record_btrace_cmdlist
,
3127 &set_record_cmdlist
, &show_record_cmdlist
);
3129 add_setshow_enum_cmd ("replay-memory-access", no_class
,
3130 replay_memory_access_types
, &replay_memory_access
, _("\
3131 Set what memory accesses are allowed during replay."), _("\
3132 Show what memory accesses are allowed during replay."),
3133 _("Default is READ-ONLY.\n\n\
3134 The btrace record target does not trace data.\n\
3135 The memory therefore corresponds to the live target and not \
3136 to the current replay position.\n\n\
3137 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3138 When READ-WRITE, allow accesses to read-only and read-write memory during \
3140 NULL
, cmd_show_replay_memory_access
,
3141 &set_record_btrace_cmdlist
,
3142 &show_record_btrace_cmdlist
);
3144 add_prefix_cmd ("cpu", class_support
, cmd_set_record_btrace_cpu
,
3146 Set the cpu to be used for trace decode.\n\n\
3147 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3148 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3149 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3150 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3151 When GDB does not support that cpu, this option can be used to enable\n\
3152 workarounds for a similar cpu that GDB supports.\n\n\
3153 When set to \"none\", errata workarounds are disabled."),
3154 &set_record_btrace_cpu_cmdlist
,
3156 &set_record_btrace_cmdlist
);
3158 add_cmd ("auto", class_support
, cmd_set_record_btrace_cpu_auto
, _("\
3159 Automatically determine the cpu to be used for trace decode."),
3160 &set_record_btrace_cpu_cmdlist
);
3162 add_cmd ("none", class_support
, cmd_set_record_btrace_cpu_none
, _("\
3163 Do not enable errata workarounds for trace decode."),
3164 &set_record_btrace_cpu_cmdlist
);
3166 add_cmd ("cpu", class_support
, cmd_show_record_btrace_cpu
, _("\
3167 Show the cpu to be used for trace decode."),
3168 &show_record_btrace_cmdlist
);
3170 add_setshow_prefix_cmd ("bts", class_support
,
3171 _("Set record btrace bts options."),
3172 _("Show record btrace bts options."),
3173 &set_record_btrace_bts_cmdlist
,
3174 &show_record_btrace_bts_cmdlist
,
3175 &set_record_btrace_cmdlist
,
3176 &show_record_btrace_cmdlist
);
3178 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3179 &record_btrace_conf
.bts
.size
,
3180 _("Set the record/replay bts buffer size."),
3181 _("Show the record/replay bts buffer size."), _("\
3182 When starting recording request a trace buffer of this size. \
3183 The actual buffer size may differ from the requested size. \
3184 Use \"info record\" to see the actual buffer size.\n\n\
3185 Bigger buffers allow longer recording but also take more time to process \
3186 the recorded execution trace.\n\n\
3187 The trace buffer size may not be changed while recording."), NULL
,
3188 show_record_bts_buffer_size_value
,
3189 &set_record_btrace_bts_cmdlist
,
3190 &show_record_btrace_bts_cmdlist
);
3192 add_setshow_prefix_cmd ("pt", class_support
,
3193 _("Set record btrace pt options."),
3194 _("Show record btrace pt options."),
3195 &set_record_btrace_pt_cmdlist
,
3196 &show_record_btrace_pt_cmdlist
,
3197 &set_record_btrace_cmdlist
,
3198 &show_record_btrace_cmdlist
);
3200 add_setshow_uinteger_cmd ("buffer-size", no_class
,
3201 &record_btrace_conf
.pt
.size
,
3202 _("Set the record/replay pt buffer size."),
3203 _("Show the record/replay pt buffer size."), _("\
3204 Bigger buffers allow longer recording but also take more time to process \
3205 the recorded execution.\n\
3206 The actual buffer size may differ from the requested size. Use \"info record\" \
3207 to see the actual buffer size."), NULL
, show_record_pt_buffer_size_value
,
3208 &set_record_btrace_pt_cmdlist
,
3209 &show_record_btrace_pt_cmdlist
);
3211 add_target (record_btrace_target_info
, record_btrace_target_open
);
3213 bfcache
= htab_create_alloc (50, bfcache_hash
, bfcache_eq
, NULL
,
3216 record_btrace_conf
.bts
.size
= 64 * 1024;
3217 record_btrace_conf
.pt
.size
= 16 * 1024;