]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
gdb: make find_thread_ptid a process_stratum_target method
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "gdbsupport/event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46 #include <forward_list>
47 #include "objfiles.h"
48
49 static const target_info record_btrace_target_info = {
50 "record-btrace",
51 N_("Branch tracing target"),
52 N_("Collect control-flow trace and provide the execution history.")
53 };
54
55 /* The target_ops of record-btrace. */
56
57 class record_btrace_target final : public target_ops
58 {
59 public:
60 const target_info &info () const override
61 { return record_btrace_target_info; }
62
63 strata stratum () const override { return record_stratum; }
64
65 void close () override;
66 void async (bool) override;
67
68 void detach (inferior *inf, int from_tty) override
69 { record_detach (this, inf, from_tty); }
70
71 void disconnect (const char *, int) override;
72
73 void mourn_inferior () override
74 { record_mourn_inferior (this); }
75
76 void kill () override
77 { record_kill (this); }
78
79 enum record_method record_method (ptid_t ptid) override;
80
81 void stop_recording () override;
82 void info_record () override;
83
84 void insn_history (int size, gdb_disassembly_flags flags) override;
85 void insn_history_from (ULONGEST from, int size,
86 gdb_disassembly_flags flags) override;
87 void insn_history_range (ULONGEST begin, ULONGEST end,
88 gdb_disassembly_flags flags) override;
89 void call_history (int size, record_print_flags flags) override;
90 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
91 override;
92 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
93 override;
94
95 bool record_is_replaying (ptid_t ptid) override;
96 bool record_will_replay (ptid_t ptid, int dir) override;
97 void record_stop_replaying () override;
98
99 enum target_xfer_status xfer_partial (enum target_object object,
100 const char *annex,
101 gdb_byte *readbuf,
102 const gdb_byte *writebuf,
103 ULONGEST offset, ULONGEST len,
104 ULONGEST *xfered_len) override;
105
106 int insert_breakpoint (struct gdbarch *,
107 struct bp_target_info *) override;
108 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
109 enum remove_bp_reason) override;
110
111 void fetch_registers (struct regcache *, int) override;
112
113 void store_registers (struct regcache *, int) override;
114 void prepare_to_store (struct regcache *) override;
115
116 const struct frame_unwind *get_unwinder () override;
117
118 const struct frame_unwind *get_tailcall_unwinder () override;
119
120 void resume (ptid_t, int, enum gdb_signal) override;
121 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
122
123 void stop (ptid_t) override;
124 void update_thread_list () override;
125 bool thread_alive (ptid_t ptid) override;
126 void goto_record_begin () override;
127 void goto_record_end () override;
128 void goto_record (ULONGEST insn) override;
129
130 bool can_execute_reverse () override;
131
132 bool stopped_by_sw_breakpoint () override;
133 bool supports_stopped_by_sw_breakpoint () override;
134
135 bool stopped_by_hw_breakpoint () override;
136 bool supports_stopped_by_hw_breakpoint () override;
137
138 enum exec_direction_kind execution_direction () override;
139 void prepare_to_generate_core () override;
140 void done_generating_core () override;
141 };
142
143 static record_btrace_target record_btrace_ops;
144
145 /* Initialize the record-btrace target ops. */
146
147 /* Token associated with a new-thread observer enabling branch tracing
148 for the new thread. */
149 static const gdb::observers::token record_btrace_thread_observer_token {};
150
151 /* Memory access types used in set/show record btrace replay-memory-access. */
152 static const char replay_memory_access_read_only[] = "read-only";
153 static const char replay_memory_access_read_write[] = "read-write";
154 static const char *const replay_memory_access_types[] =
155 {
156 replay_memory_access_read_only,
157 replay_memory_access_read_write,
158 NULL
159 };
160
161 /* The currently allowed replay memory access type. */
162 static const char *replay_memory_access = replay_memory_access_read_only;
163
164 /* The cpu state kinds. */
165 enum record_btrace_cpu_state_kind
166 {
167 CS_AUTO,
168 CS_NONE,
169 CS_CPU
170 };
171
172 /* The current cpu state. */
173 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
174
175 /* The current cpu for trace decode. */
176 static struct btrace_cpu record_btrace_cpu;
177
178 /* Command lists for "set/show record btrace". */
179 static struct cmd_list_element *set_record_btrace_cmdlist;
180 static struct cmd_list_element *show_record_btrace_cmdlist;
181
182 /* The execution direction of the last resume we got. See record-full.c. */
183 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
184
185 /* The async event handler for reverse/replay execution. */
186 static struct async_event_handler *record_btrace_async_inferior_event_handler;
187
188 /* A flag indicating that we are currently generating a core file. */
189 static int record_btrace_generating_corefile;
190
191 /* The current branch trace configuration. */
192 static struct btrace_config record_btrace_conf;
193
194 /* Command list for "record btrace". */
195 static struct cmd_list_element *record_btrace_cmdlist;
196
197 /* Command lists for "set/show record btrace bts". */
198 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
199 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
200
201 /* Command lists for "set/show record btrace pt". */
202 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
203 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
204
205 /* Command list for "set record btrace cpu". */
206 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
207
208 /* Print a record-btrace debug message. Use do ... while (0) to avoid
209 ambiguities when used in if statements. */
210
211 #define DEBUG(msg, args...) \
212 do \
213 { \
214 if (record_debug != 0) \
215 gdb_printf (gdb_stdlog, \
216 "[record-btrace] " msg "\n", ##args); \
217 } \
218 while (0)
219
220
221 /* Return the cpu configured by the user. Returns NULL if the cpu was
222 configured as auto. */
223 const struct btrace_cpu *
224 record_btrace_get_cpu (void)
225 {
226 switch (record_btrace_cpu_state)
227 {
228 case CS_AUTO:
229 return nullptr;
230
231 case CS_NONE:
232 record_btrace_cpu.vendor = CV_UNKNOWN;
233 /* Fall through. */
234 case CS_CPU:
235 return &record_btrace_cpu;
236 }
237
238 error (_("Internal error: bad record btrace cpu state."));
239 }
240
241 /* Update the branch trace for the current thread and return a pointer to its
242 thread_info.
243
244 Throws an error if there is no thread or no trace. This function never
245 returns NULL. */
246
247 static struct thread_info *
248 require_btrace_thread (void)
249 {
250 DEBUG ("require");
251
252 if (inferior_ptid == null_ptid)
253 error (_("No thread."));
254
255 thread_info *tp = inferior_thread ();
256
257 validate_registers_access ();
258
259 btrace_fetch (tp, record_btrace_get_cpu ());
260
261 if (btrace_is_empty (tp))
262 error (_("No trace."));
263
264 return tp;
265 }
266
267 /* Update the branch trace for the current thread and return a pointer to its
268 branch trace information struct.
269
270 Throws an error if there is no thread or no trace. This function never
271 returns NULL. */
272
273 static struct btrace_thread_info *
274 require_btrace (void)
275 {
276 struct thread_info *tp;
277
278 tp = require_btrace_thread ();
279
280 return &tp->btrace;
281 }
282
283 /* The new thread observer. */
284
285 static void
286 record_btrace_on_new_thread (struct thread_info *tp)
287 {
288 /* Ignore this thread if its inferior is not recorded by us. */
289 target_ops *rec = tp->inf->target_at (record_stratum);
290 if (rec != &record_btrace_ops)
291 return;
292
293 try
294 {
295 btrace_enable (tp, &record_btrace_conf);
296 }
297 catch (const gdb_exception_error &error)
298 {
299 warning ("%s", error.what ());
300 }
301 }
302
303 /* Enable automatic tracing of new threads. */
304
305 static void
306 record_btrace_auto_enable (void)
307 {
308 DEBUG ("attach thread observer");
309
310 gdb::observers::new_thread.attach (record_btrace_on_new_thread,
311 record_btrace_thread_observer_token,
312 "record-btrace");
313 }
314
315 /* Disable automatic tracing of new threads. */
316
317 static void
318 record_btrace_auto_disable (void)
319 {
320 DEBUG ("detach thread observer");
321
322 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
323 }
324
325 /* The record-btrace async event handler function. */
326
327 static void
328 record_btrace_handle_async_inferior_event (gdb_client_data data)
329 {
330 inferior_event_handler (INF_REG_EVENT);
331 }
332
333 /* See record-btrace.h. */
334
335 void
336 record_btrace_push_target (void)
337 {
338 const char *format;
339
340 record_btrace_auto_enable ();
341
342 current_inferior ()->push_target (&record_btrace_ops);
343
344 record_btrace_async_inferior_event_handler
345 = create_async_event_handler (record_btrace_handle_async_inferior_event,
346 NULL, "record-btrace");
347 record_btrace_generating_corefile = 0;
348
349 format = btrace_format_short_string (record_btrace_conf.format);
350 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
351 }
352
353 /* Disable btrace on a set of threads on scope exit. */
354
355 struct scoped_btrace_disable
356 {
357 scoped_btrace_disable () = default;
358
359 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
360
361 ~scoped_btrace_disable ()
362 {
363 for (thread_info *tp : m_threads)
364 btrace_disable (tp);
365 }
366
367 void add_thread (thread_info *thread)
368 {
369 m_threads.push_front (thread);
370 }
371
372 void discard ()
373 {
374 m_threads.clear ();
375 }
376
377 private:
378 std::forward_list<thread_info *> m_threads;
379 };
380
381 /* Open target record-btrace. */
382
383 static void
384 record_btrace_target_open (const char *args, int from_tty)
385 {
386 /* If we fail to enable btrace for one thread, disable it for the threads for
387 which it was successfully enabled. */
388 scoped_btrace_disable btrace_disable;
389
390 DEBUG ("open");
391
392 record_preopen ();
393
394 if (!target_has_execution ())
395 error (_("The program is not being run."));
396
397 for (thread_info *tp : current_inferior ()->non_exited_threads ())
398 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
399 {
400 btrace_enable (tp, &record_btrace_conf);
401
402 btrace_disable.add_thread (tp);
403 }
404
405 record_btrace_push_target ();
406
407 btrace_disable.discard ();
408 }
409
410 /* The stop_recording method of target record-btrace. */
411
412 void
413 record_btrace_target::stop_recording ()
414 {
415 DEBUG ("stop recording");
416
417 record_btrace_auto_disable ();
418
419 for (thread_info *tp : current_inferior ()->non_exited_threads ())
420 if (tp->btrace.target != NULL)
421 btrace_disable (tp);
422 }
423
424 /* The disconnect method of target record-btrace. */
425
426 void
427 record_btrace_target::disconnect (const char *args,
428 int from_tty)
429 {
430 struct target_ops *beneath = this->beneath ();
431
432 /* Do not stop recording, just clean up GDB side. */
433 current_inferior ()->unpush_target (this);
434
435 /* Forward disconnect. */
436 beneath->disconnect (args, from_tty);
437 }
438
439 /* The close method of target record-btrace. */
440
441 void
442 record_btrace_target::close ()
443 {
444 if (record_btrace_async_inferior_event_handler != NULL)
445 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
446
447 /* Make sure automatic recording gets disabled even if we did not stop
448 recording before closing the record-btrace target. */
449 record_btrace_auto_disable ();
450
451 /* We should have already stopped recording.
452 Tear down btrace in case we have not. */
453 for (thread_info *tp : current_inferior ()->non_exited_threads ())
454 btrace_teardown (tp);
455 }
456
457 /* The async method of target record-btrace. */
458
459 void
460 record_btrace_target::async (bool enable)
461 {
462 if (enable)
463 mark_async_event_handler (record_btrace_async_inferior_event_handler);
464 else
465 clear_async_event_handler (record_btrace_async_inferior_event_handler);
466
467 this->beneath ()->async (enable);
468 }
469
470 /* Adjusts the size and returns a human readable size suffix. */
471
472 static const char *
473 record_btrace_adjust_size (unsigned int *size)
474 {
475 unsigned int sz;
476
477 sz = *size;
478
479 if ((sz & ((1u << 30) - 1)) == 0)
480 {
481 *size = sz >> 30;
482 return "GB";
483 }
484 else if ((sz & ((1u << 20) - 1)) == 0)
485 {
486 *size = sz >> 20;
487 return "MB";
488 }
489 else if ((sz & ((1u << 10) - 1)) == 0)
490 {
491 *size = sz >> 10;
492 return "kB";
493 }
494 else
495 return "";
496 }
497
498 /* Print a BTS configuration. */
499
500 static void
501 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
502 {
503 const char *suffix;
504 unsigned int size;
505
506 size = conf->size;
507 if (size > 0)
508 {
509 suffix = record_btrace_adjust_size (&size);
510 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
511 }
512 }
513
514 /* Print an Intel Processor Trace configuration. */
515
516 static void
517 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
518 {
519 const char *suffix;
520 unsigned int size;
521
522 size = conf->size;
523 if (size > 0)
524 {
525 suffix = record_btrace_adjust_size (&size);
526 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
527 }
528 }
529
530 /* Print a branch tracing configuration. */
531
532 static void
533 record_btrace_print_conf (const struct btrace_config *conf)
534 {
535 gdb_printf (_("Recording format: %s.\n"),
536 btrace_format_string (conf->format));
537
538 switch (conf->format)
539 {
540 case BTRACE_FORMAT_NONE:
541 return;
542
543 case BTRACE_FORMAT_BTS:
544 record_btrace_print_bts_conf (&conf->bts);
545 return;
546
547 case BTRACE_FORMAT_PT:
548 record_btrace_print_pt_conf (&conf->pt);
549 return;
550 }
551
552 internal_error (_("Unknown branch trace format."));
553 }
554
555 /* The info_record method of target record-btrace. */
556
557 void
558 record_btrace_target::info_record ()
559 {
560 struct btrace_thread_info *btinfo;
561 const struct btrace_config *conf;
562 struct thread_info *tp;
563 unsigned int insns, calls, gaps;
564
565 DEBUG ("info");
566
567 if (inferior_ptid == null_ptid)
568 error (_("No thread."));
569
570 tp = inferior_thread ();
571
572 validate_registers_access ();
573
574 btinfo = &tp->btrace;
575
576 conf = ::btrace_conf (btinfo);
577 if (conf != NULL)
578 record_btrace_print_conf (conf);
579
580 btrace_fetch (tp, record_btrace_get_cpu ());
581
582 insns = 0;
583 calls = 0;
584 gaps = 0;
585
586 if (!btrace_is_empty (tp))
587 {
588 struct btrace_call_iterator call;
589 struct btrace_insn_iterator insn;
590
591 btrace_call_end (&call, btinfo);
592 btrace_call_prev (&call, 1);
593 calls = btrace_call_number (&call);
594
595 btrace_insn_end (&insn, btinfo);
596 insns = btrace_insn_number (&insn);
597
598 /* If the last instruction is not a gap, it is the current instruction
599 that is not actually part of the record. */
600 if (btrace_insn_get (&insn) != NULL)
601 insns -= 1;
602
603 gaps = btinfo->ngaps;
604 }
605
606 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
607 "for thread %s (%s).\n"), insns, calls, gaps,
608 print_thread_id (tp),
609 target_pid_to_str (tp->ptid).c_str ());
610
611 if (btrace_is_replaying (tp))
612 gdb_printf (_("Replay in progress. At instruction %u.\n"),
613 btrace_insn_number (btinfo->replay));
614 }
615
616 /* Print a decode error. */
617
618 static void
619 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
620 enum btrace_format format)
621 {
622 const char *errstr = btrace_decode_error (format, errcode);
623
624 uiout->text (_("["));
625 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
626 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
627 {
628 uiout->text (_("decode error ("));
629 uiout->field_signed ("errcode", errcode);
630 uiout->text (_("): "));
631 }
632 uiout->text (errstr);
633 uiout->text (_("]\n"));
634 }
635
636 /* A range of source lines. */
637
638 struct btrace_line_range
639 {
640 /* The symtab this line is from. */
641 struct symtab *symtab;
642
643 /* The first line (inclusive). */
644 int begin;
645
646 /* The last line (exclusive). */
647 int end;
648 };
649
650 /* Construct a line range. */
651
652 static struct btrace_line_range
653 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
654 {
655 struct btrace_line_range range;
656
657 range.symtab = symtab;
658 range.begin = begin;
659 range.end = end;
660
661 return range;
662 }
663
664 /* Add a line to a line range. */
665
666 static struct btrace_line_range
667 btrace_line_range_add (struct btrace_line_range range, int line)
668 {
669 if (range.end <= range.begin)
670 {
671 /* This is the first entry. */
672 range.begin = line;
673 range.end = line + 1;
674 }
675 else if (line < range.begin)
676 range.begin = line;
677 else if (range.end < line)
678 range.end = line;
679
680 return range;
681 }
682
683 /* Return non-zero if RANGE is empty, zero otherwise. */
684
685 static int
686 btrace_line_range_is_empty (struct btrace_line_range range)
687 {
688 return range.end <= range.begin;
689 }
690
691 /* Return non-zero if LHS contains RHS, zero otherwise. */
692
693 static int
694 btrace_line_range_contains_range (struct btrace_line_range lhs,
695 struct btrace_line_range rhs)
696 {
697 return ((lhs.symtab == rhs.symtab)
698 && (lhs.begin <= rhs.begin)
699 && (rhs.end <= lhs.end));
700 }
701
702 /* Find the line range associated with PC. */
703
704 static struct btrace_line_range
705 btrace_find_line_range (CORE_ADDR pc)
706 {
707 struct btrace_line_range range;
708 const linetable_entry *lines;
709 const linetable *ltable;
710 struct symtab *symtab;
711 int nlines, i;
712
713 symtab = find_pc_line_symtab (pc);
714 if (symtab == NULL)
715 return btrace_mk_line_range (NULL, 0, 0);
716
717 ltable = symtab->linetable ();
718 if (ltable == NULL)
719 return btrace_mk_line_range (symtab, 0, 0);
720
721 nlines = ltable->nitems;
722 lines = ltable->item;
723 if (nlines <= 0)
724 return btrace_mk_line_range (symtab, 0, 0);
725
726 struct objfile *objfile = symtab->compunit ()->objfile ();
727 unrelocated_addr unrel_pc
728 = unrelocated_addr (pc - objfile->text_section_offset ());
729
730 range = btrace_mk_line_range (symtab, 0, 0);
731 for (i = 0; i < nlines - 1; i++)
732 {
733 /* The test of is_stmt here was added when the is_stmt field was
734 introduced to the 'struct linetable_entry' structure. This
735 ensured that this loop maintained the same behaviour as before we
736 introduced is_stmt. That said, it might be that we would be
737 better off not checking is_stmt here, this would lead to us
738 possibly adding more line numbers to the range. At the time this
739 change was made I was unsure how to test this so chose to go with
740 maintaining the existing experience. */
741 if (lines[i].raw_pc () == unrel_pc && lines[i].line != 0
742 && lines[i].is_stmt)
743 range = btrace_line_range_add (range, lines[i].line);
744 }
745
746 return range;
747 }
748
749 /* Print source lines in LINES to UIOUT.
750
751 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
752 instructions corresponding to that source line. When printing a new source
753 line, we do the cleanups for the open chain and open a new cleanup chain for
754 the new source line. If the source line range in LINES is not empty, this
755 function will leave the cleanup chain for the last printed source line open
756 so instructions can be added to it. */
757
758 static void
759 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
760 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
761 gdb::optional<ui_out_emit_list> *asm_list,
762 gdb_disassembly_flags flags)
763 {
764 print_source_lines_flags psl_flags;
765
766 if (flags & DISASSEMBLY_FILENAME)
767 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
768
769 for (int line = lines.begin; line < lines.end; ++line)
770 {
771 asm_list->reset ();
772
773 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
774
775 print_source_lines (lines.symtab, line, line + 1, psl_flags);
776
777 asm_list->emplace (uiout, "line_asm_insn");
778 }
779 }
780
781 /* Disassemble a section of the recorded instruction trace. */
782
783 static void
784 btrace_insn_history (struct ui_out *uiout,
785 const struct btrace_thread_info *btinfo,
786 const struct btrace_insn_iterator *begin,
787 const struct btrace_insn_iterator *end,
788 gdb_disassembly_flags flags)
789 {
790 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
791 btrace_insn_number (begin), btrace_insn_number (end));
792
793 flags |= DISASSEMBLY_SPECULATIVE;
794
795 struct gdbarch *gdbarch = target_gdbarch ();
796 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
797
798 ui_out_emit_list list_emitter (uiout, "asm_insns");
799
800 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
801 gdb::optional<ui_out_emit_list> asm_list;
802
803 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
804
805 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
806 btrace_insn_next (&it, 1))
807 {
808 const struct btrace_insn *insn;
809
810 insn = btrace_insn_get (&it);
811
812 /* A NULL instruction indicates a gap in the trace. */
813 if (insn == NULL)
814 {
815 const struct btrace_config *conf;
816
817 conf = btrace_conf (btinfo);
818
819 /* We have trace so we must have a configuration. */
820 gdb_assert (conf != NULL);
821
822 uiout->field_fmt ("insn-number", "%u",
823 btrace_insn_number (&it));
824 uiout->text ("\t");
825
826 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
827 conf->format);
828 }
829 else
830 {
831 struct disasm_insn dinsn;
832
833 if ((flags & DISASSEMBLY_SOURCE) != 0)
834 {
835 struct btrace_line_range lines;
836
837 lines = btrace_find_line_range (insn->pc);
838 if (!btrace_line_range_is_empty (lines)
839 && !btrace_line_range_contains_range (last_lines, lines))
840 {
841 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
842 flags);
843 last_lines = lines;
844 }
845 else if (!src_and_asm_tuple.has_value ())
846 {
847 gdb_assert (!asm_list.has_value ());
848
849 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
850
851 /* No source information. */
852 asm_list.emplace (uiout, "line_asm_insn");
853 }
854
855 gdb_assert (src_and_asm_tuple.has_value ());
856 gdb_assert (asm_list.has_value ());
857 }
858
859 memset (&dinsn, 0, sizeof (dinsn));
860 dinsn.number = btrace_insn_number (&it);
861 dinsn.addr = insn->pc;
862
863 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
864 dinsn.is_speculative = 1;
865
866 disasm.pretty_print_insn (&dinsn, flags);
867 }
868 }
869 }
870
871 /* The insn_history method of target record-btrace. */
872
873 void
874 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
875 {
876 struct btrace_thread_info *btinfo;
877 struct btrace_insn_history *history;
878 struct btrace_insn_iterator begin, end;
879 struct ui_out *uiout;
880 unsigned int context, covered;
881
882 uiout = current_uiout;
883 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
884 context = abs (size);
885 if (context == 0)
886 error (_("Bad record instruction-history-size."));
887
888 btinfo = require_btrace ();
889 history = btinfo->insn_history;
890 if (history == NULL)
891 {
892 struct btrace_insn_iterator *replay;
893
894 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
895
896 /* If we're replaying, we start at the replay position. Otherwise, we
897 start at the tail of the trace. */
898 replay = btinfo->replay;
899 if (replay != NULL)
900 begin = *replay;
901 else
902 btrace_insn_end (&begin, btinfo);
903
904 /* We start from here and expand in the requested direction. Then we
905 expand in the other direction, as well, to fill up any remaining
906 context. */
907 end = begin;
908 if (size < 0)
909 {
910 /* We want the current position covered, as well. */
911 covered = btrace_insn_next (&end, 1);
912 covered += btrace_insn_prev (&begin, context - covered);
913 covered += btrace_insn_next (&end, context - covered);
914 }
915 else
916 {
917 covered = btrace_insn_next (&end, context);
918 covered += btrace_insn_prev (&begin, context - covered);
919 }
920 }
921 else
922 {
923 begin = history->begin;
924 end = history->end;
925
926 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
927 btrace_insn_number (&begin), btrace_insn_number (&end));
928
929 if (size < 0)
930 {
931 end = begin;
932 covered = btrace_insn_prev (&begin, context);
933 }
934 else
935 {
936 begin = end;
937 covered = btrace_insn_next (&end, context);
938 }
939 }
940
941 if (covered > 0)
942 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
943 else
944 {
945 if (size < 0)
946 gdb_printf (_("At the start of the branch trace record.\n"));
947 else
948 gdb_printf (_("At the end of the branch trace record.\n"));
949 }
950
951 btrace_set_insn_history (btinfo, &begin, &end);
952 }
953
954 /* The insn_history_range method of target record-btrace. */
955
956 void
957 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
958 gdb_disassembly_flags flags)
959 {
960 struct btrace_thread_info *btinfo;
961 struct btrace_insn_iterator begin, end;
962 struct ui_out *uiout;
963 unsigned int low, high;
964 int found;
965
966 uiout = current_uiout;
967 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
968 low = from;
969 high = to;
970
971 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
972
973 /* Check for wrap-arounds. */
974 if (low != from || high != to)
975 error (_("Bad range."));
976
977 if (high < low)
978 error (_("Bad range."));
979
980 btinfo = require_btrace ();
981
982 found = btrace_find_insn_by_number (&begin, btinfo, low);
983 if (found == 0)
984 error (_("Range out of bounds."));
985
986 found = btrace_find_insn_by_number (&end, btinfo, high);
987 if (found == 0)
988 {
989 /* Silently truncate the range. */
990 btrace_insn_end (&end, btinfo);
991 }
992 else
993 {
994 /* We want both begin and end to be inclusive. */
995 btrace_insn_next (&end, 1);
996 }
997
998 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
999 btrace_set_insn_history (btinfo, &begin, &end);
1000 }
1001
1002 /* The insn_history_from method of target record-btrace. */
1003
1004 void
1005 record_btrace_target::insn_history_from (ULONGEST from, int size,
1006 gdb_disassembly_flags flags)
1007 {
1008 ULONGEST begin, end, context;
1009
1010 context = abs (size);
1011 if (context == 0)
1012 error (_("Bad record instruction-history-size."));
1013
1014 if (size < 0)
1015 {
1016 end = from;
1017
1018 if (from < context)
1019 begin = 0;
1020 else
1021 begin = from - context + 1;
1022 }
1023 else
1024 {
1025 begin = from;
1026 end = from + context - 1;
1027
1028 /* Check for wrap-around. */
1029 if (end < begin)
1030 end = ULONGEST_MAX;
1031 }
1032
1033 insn_history_range (begin, end, flags);
1034 }
1035
1036 /* Print the instruction number range for a function call history line. */
1037
1038 static void
1039 btrace_call_history_insn_range (struct ui_out *uiout,
1040 const struct btrace_function *bfun)
1041 {
1042 unsigned int begin, end, size;
1043
1044 size = bfun->insn.size ();
1045 gdb_assert (size > 0);
1046
1047 begin = bfun->insn_offset;
1048 end = begin + size - 1;
1049
1050 uiout->field_unsigned ("insn begin", begin);
1051 uiout->text (",");
1052 uiout->field_unsigned ("insn end", end);
1053 }
1054
1055 /* Compute the lowest and highest source line for the instructions in BFUN
1056 and return them in PBEGIN and PEND.
1057 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1058 result from inlining or macro expansion. */
1059
1060 static void
1061 btrace_compute_src_line_range (const struct btrace_function *bfun,
1062 int *pbegin, int *pend)
1063 {
1064 struct symtab *symtab;
1065 struct symbol *sym;
1066 int begin, end;
1067
1068 begin = INT_MAX;
1069 end = INT_MIN;
1070
1071 sym = bfun->sym;
1072 if (sym == NULL)
1073 goto out;
1074
1075 symtab = sym->symtab ();
1076
1077 for (const btrace_insn &insn : bfun->insn)
1078 {
1079 struct symtab_and_line sal;
1080
1081 sal = find_pc_line (insn.pc, 0);
1082 if (sal.symtab != symtab || sal.line == 0)
1083 continue;
1084
1085 begin = std::min (begin, sal.line);
1086 end = std::max (end, sal.line);
1087 }
1088
1089 out:
1090 *pbegin = begin;
1091 *pend = end;
1092 }
1093
1094 /* Print the source line information for a function call history line. */
1095
1096 static void
1097 btrace_call_history_src_line (struct ui_out *uiout,
1098 const struct btrace_function *bfun)
1099 {
1100 struct symbol *sym;
1101 int begin, end;
1102
1103 sym = bfun->sym;
1104 if (sym == NULL)
1105 return;
1106
1107 uiout->field_string ("file",
1108 symtab_to_filename_for_display (sym->symtab ()),
1109 file_name_style.style ());
1110
1111 btrace_compute_src_line_range (bfun, &begin, &end);
1112 if (end < begin)
1113 return;
1114
1115 uiout->text (":");
1116 uiout->field_signed ("min line", begin);
1117
1118 if (end == begin)
1119 return;
1120
1121 uiout->text (",");
1122 uiout->field_signed ("max line", end);
1123 }
1124
1125 /* Get the name of a branch trace function. */
1126
1127 static const char *
1128 btrace_get_bfun_name (const struct btrace_function *bfun)
1129 {
1130 struct minimal_symbol *msym;
1131 struct symbol *sym;
1132
1133 if (bfun == NULL)
1134 return "??";
1135
1136 msym = bfun->msym;
1137 sym = bfun->sym;
1138
1139 if (sym != NULL)
1140 return sym->print_name ();
1141 else if (msym != NULL)
1142 return msym->print_name ();
1143 else
1144 return "??";
1145 }
1146
1147 /* Disassemble a section of the recorded function trace. */
1148
1149 static void
1150 btrace_call_history (struct ui_out *uiout,
1151 const struct btrace_thread_info *btinfo,
1152 const struct btrace_call_iterator *begin,
1153 const struct btrace_call_iterator *end,
1154 int int_flags)
1155 {
1156 struct btrace_call_iterator it;
1157 record_print_flags flags = (enum record_print_flag) int_flags;
1158
1159 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1160 btrace_call_number (end));
1161
1162 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1163 {
1164 const struct btrace_function *bfun;
1165 struct minimal_symbol *msym;
1166 struct symbol *sym;
1167
1168 bfun = btrace_call_get (&it);
1169 sym = bfun->sym;
1170 msym = bfun->msym;
1171
1172 /* Print the function index. */
1173 uiout->field_unsigned ("index", bfun->number);
1174 uiout->text ("\t");
1175
1176 /* Indicate gaps in the trace. */
1177 if (bfun->errcode != 0)
1178 {
1179 const struct btrace_config *conf;
1180
1181 conf = btrace_conf (btinfo);
1182
1183 /* We have trace so we must have a configuration. */
1184 gdb_assert (conf != NULL);
1185
1186 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1187
1188 continue;
1189 }
1190
1191 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1192 {
1193 int level = bfun->level + btinfo->level, i;
1194
1195 for (i = 0; i < level; ++i)
1196 uiout->text (" ");
1197 }
1198
1199 if (sym != NULL)
1200 uiout->field_string ("function", sym->print_name (),
1201 function_name_style.style ());
1202 else if (msym != NULL)
1203 uiout->field_string ("function", msym->print_name (),
1204 function_name_style.style ());
1205 else if (!uiout->is_mi_like_p ())
1206 uiout->field_string ("function", "??",
1207 function_name_style.style ());
1208
1209 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1210 {
1211 uiout->text (_("\tinst "));
1212 btrace_call_history_insn_range (uiout, bfun);
1213 }
1214
1215 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1216 {
1217 uiout->text (_("\tat "));
1218 btrace_call_history_src_line (uiout, bfun);
1219 }
1220
1221 uiout->text ("\n");
1222 }
1223 }
1224
1225 /* The call_history method of target record-btrace. */
1226
1227 void
1228 record_btrace_target::call_history (int size, record_print_flags flags)
1229 {
1230 struct btrace_thread_info *btinfo;
1231 struct btrace_call_history *history;
1232 struct btrace_call_iterator begin, end;
1233 struct ui_out *uiout;
1234 unsigned int context, covered;
1235
1236 uiout = current_uiout;
1237 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1238 context = abs (size);
1239 if (context == 0)
1240 error (_("Bad record function-call-history-size."));
1241
1242 btinfo = require_btrace ();
1243 history = btinfo->call_history;
1244 if (history == NULL)
1245 {
1246 struct btrace_insn_iterator *replay;
1247
1248 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1249
1250 /* If we're replaying, we start at the replay position. Otherwise, we
1251 start at the tail of the trace. */
1252 replay = btinfo->replay;
1253 if (replay != NULL)
1254 {
1255 begin.btinfo = btinfo;
1256 begin.index = replay->call_index;
1257 }
1258 else
1259 btrace_call_end (&begin, btinfo);
1260
1261 /* We start from here and expand in the requested direction. Then we
1262 expand in the other direction, as well, to fill up any remaining
1263 context. */
1264 end = begin;
1265 if (size < 0)
1266 {
1267 /* We want the current position covered, as well. */
1268 covered = btrace_call_next (&end, 1);
1269 covered += btrace_call_prev (&begin, context - covered);
1270 covered += btrace_call_next (&end, context - covered);
1271 }
1272 else
1273 {
1274 covered = btrace_call_next (&end, context);
1275 covered += btrace_call_prev (&begin, context- covered);
1276 }
1277 }
1278 else
1279 {
1280 begin = history->begin;
1281 end = history->end;
1282
1283 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1284 btrace_call_number (&begin), btrace_call_number (&end));
1285
1286 if (size < 0)
1287 {
1288 end = begin;
1289 covered = btrace_call_prev (&begin, context);
1290 }
1291 else
1292 {
1293 begin = end;
1294 covered = btrace_call_next (&end, context);
1295 }
1296 }
1297
1298 if (covered > 0)
1299 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1300 else
1301 {
1302 if (size < 0)
1303 gdb_printf (_("At the start of the branch trace record.\n"));
1304 else
1305 gdb_printf (_("At the end of the branch trace record.\n"));
1306 }
1307
1308 btrace_set_call_history (btinfo, &begin, &end);
1309 }
1310
1311 /* The call_history_range method of target record-btrace. */
1312
1313 void
1314 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1315 record_print_flags flags)
1316 {
1317 struct btrace_thread_info *btinfo;
1318 struct btrace_call_iterator begin, end;
1319 struct ui_out *uiout;
1320 unsigned int low, high;
1321 int found;
1322
1323 uiout = current_uiout;
1324 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1325 low = from;
1326 high = to;
1327
1328 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1329
1330 /* Check for wrap-arounds. */
1331 if (low != from || high != to)
1332 error (_("Bad range."));
1333
1334 if (high < low)
1335 error (_("Bad range."));
1336
1337 btinfo = require_btrace ();
1338
1339 found = btrace_find_call_by_number (&begin, btinfo, low);
1340 if (found == 0)
1341 error (_("Range out of bounds."));
1342
1343 found = btrace_find_call_by_number (&end, btinfo, high);
1344 if (found == 0)
1345 {
1346 /* Silently truncate the range. */
1347 btrace_call_end (&end, btinfo);
1348 }
1349 else
1350 {
1351 /* We want both begin and end to be inclusive. */
1352 btrace_call_next (&end, 1);
1353 }
1354
1355 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1356 btrace_set_call_history (btinfo, &begin, &end);
1357 }
1358
1359 /* The call_history_from method of target record-btrace. */
1360
1361 void
1362 record_btrace_target::call_history_from (ULONGEST from, int size,
1363 record_print_flags flags)
1364 {
1365 ULONGEST begin, end, context;
1366
1367 context = abs (size);
1368 if (context == 0)
1369 error (_("Bad record function-call-history-size."));
1370
1371 if (size < 0)
1372 {
1373 end = from;
1374
1375 if (from < context)
1376 begin = 0;
1377 else
1378 begin = from - context + 1;
1379 }
1380 else
1381 {
1382 begin = from;
1383 end = from + context - 1;
1384
1385 /* Check for wrap-around. */
1386 if (end < begin)
1387 end = ULONGEST_MAX;
1388 }
1389
1390 call_history_range ( begin, end, flags);
1391 }
1392
1393 /* The record_method method of target record-btrace. */
1394
1395 enum record_method
1396 record_btrace_target::record_method (ptid_t ptid)
1397 {
1398 process_stratum_target *proc_target = current_inferior ()->process_target ();
1399 thread_info *const tp = proc_target->find_thread (ptid);
1400
1401 if (tp == NULL)
1402 error (_("No thread."));
1403
1404 if (tp->btrace.target == NULL)
1405 return RECORD_METHOD_NONE;
1406
1407 return RECORD_METHOD_BTRACE;
1408 }
1409
1410 /* The record_is_replaying method of target record-btrace. */
1411
1412 bool
1413 record_btrace_target::record_is_replaying (ptid_t ptid)
1414 {
1415 process_stratum_target *proc_target = current_inferior ()->process_target ();
1416 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1417 if (btrace_is_replaying (tp))
1418 return true;
1419
1420 return false;
1421 }
1422
1423 /* The record_will_replay method of target record-btrace. */
1424
1425 bool
1426 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1427 {
1428 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1429 }
1430
1431 /* The xfer_partial method of target record-btrace. */
1432
1433 enum target_xfer_status
1434 record_btrace_target::xfer_partial (enum target_object object,
1435 const char *annex, gdb_byte *readbuf,
1436 const gdb_byte *writebuf, ULONGEST offset,
1437 ULONGEST len, ULONGEST *xfered_len)
1438 {
1439 /* Filter out requests that don't make sense during replay. */
1440 if (replay_memory_access == replay_memory_access_read_only
1441 && !record_btrace_generating_corefile
1442 && record_is_replaying (inferior_ptid))
1443 {
1444 switch (object)
1445 {
1446 case TARGET_OBJECT_MEMORY:
1447 {
1448 const struct target_section *section;
1449
1450 /* We do not allow writing memory in general. */
1451 if (writebuf != NULL)
1452 {
1453 *xfered_len = len;
1454 return TARGET_XFER_UNAVAILABLE;
1455 }
1456
1457 /* We allow reading readonly memory. */
1458 section = target_section_by_addr (this, offset);
1459 if (section != NULL)
1460 {
1461 /* Check if the section we found is readonly. */
1462 if ((bfd_section_flags (section->the_bfd_section)
1463 & SEC_READONLY) != 0)
1464 {
1465 /* Truncate the request to fit into this section. */
1466 len = std::min (len, section->endaddr - offset);
1467 break;
1468 }
1469 }
1470
1471 *xfered_len = len;
1472 return TARGET_XFER_UNAVAILABLE;
1473 }
1474 }
1475 }
1476
1477 /* Forward the request. */
1478 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1479 offset, len, xfered_len);
1480 }
1481
1482 /* The insert_breakpoint method of target record-btrace. */
1483
1484 int
1485 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1486 struct bp_target_info *bp_tgt)
1487 {
1488 const char *old;
1489 int ret;
1490
1491 /* Inserting breakpoints requires accessing memory. Allow it for the
1492 duration of this function. */
1493 old = replay_memory_access;
1494 replay_memory_access = replay_memory_access_read_write;
1495
1496 ret = 0;
1497 try
1498 {
1499 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1500 }
1501 catch (const gdb_exception &except)
1502 {
1503 replay_memory_access = old;
1504 throw;
1505 }
1506 replay_memory_access = old;
1507
1508 return ret;
1509 }
1510
1511 /* The remove_breakpoint method of target record-btrace. */
1512
1513 int
1514 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1515 struct bp_target_info *bp_tgt,
1516 enum remove_bp_reason reason)
1517 {
1518 const char *old;
1519 int ret;
1520
1521 /* Removing breakpoints requires accessing memory. Allow it for the
1522 duration of this function. */
1523 old = replay_memory_access;
1524 replay_memory_access = replay_memory_access_read_write;
1525
1526 ret = 0;
1527 try
1528 {
1529 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1530 }
1531 catch (const gdb_exception &except)
1532 {
1533 replay_memory_access = old;
1534 throw;
1535 }
1536 replay_memory_access = old;
1537
1538 return ret;
1539 }
1540
1541 /* The fetch_registers method of target record-btrace. */
1542
1543 void
1544 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1545 {
1546 btrace_insn_iterator *replay = nullptr;
1547
1548 /* Thread-db may ask for a thread's registers before GDB knows about the
1549 thread. We forward the request to the target beneath in this
1550 case. */
1551 thread_info *tp = regcache->target ()->find_thread (regcache->ptid ());
1552 if (tp != nullptr)
1553 replay = tp->btrace.replay;
1554
1555 if (replay != nullptr && !record_btrace_generating_corefile)
1556 {
1557 const struct btrace_insn *insn;
1558 struct gdbarch *gdbarch;
1559 int pcreg;
1560
1561 gdbarch = regcache->arch ();
1562 pcreg = gdbarch_pc_regnum (gdbarch);
1563 if (pcreg < 0)
1564 return;
1565
1566 /* We can only provide the PC register. */
1567 if (regno >= 0 && regno != pcreg)
1568 return;
1569
1570 insn = btrace_insn_get (replay);
1571 gdb_assert (insn != NULL);
1572
1573 regcache->raw_supply (regno, &insn->pc);
1574 }
1575 else
1576 this->beneath ()->fetch_registers (regcache, regno);
1577 }
1578
1579 /* The store_registers method of target record-btrace. */
1580
1581 void
1582 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1583 {
1584 if (!record_btrace_generating_corefile
1585 && record_is_replaying (regcache->ptid ()))
1586 error (_("Cannot write registers while replaying."));
1587
1588 gdb_assert (may_write_registers);
1589
1590 this->beneath ()->store_registers (regcache, regno);
1591 }
1592
1593 /* The prepare_to_store method of target record-btrace. */
1594
1595 void
1596 record_btrace_target::prepare_to_store (struct regcache *regcache)
1597 {
1598 if (!record_btrace_generating_corefile
1599 && record_is_replaying (regcache->ptid ()))
1600 return;
1601
1602 this->beneath ()->prepare_to_store (regcache);
1603 }
1604
1605 /* The branch trace frame cache. */
1606
1607 struct btrace_frame_cache
1608 {
1609 /* The thread. */
1610 struct thread_info *tp;
1611
1612 /* The frame info. */
1613 frame_info *frame;
1614
1615 /* The branch trace function segment. */
1616 const struct btrace_function *bfun;
1617 };
1618
1619 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1620
1621 static htab_t bfcache;
1622
1623 /* hash_f for htab_create_alloc of bfcache. */
1624
1625 static hashval_t
1626 bfcache_hash (const void *arg)
1627 {
1628 const struct btrace_frame_cache *cache
1629 = (const struct btrace_frame_cache *) arg;
1630
1631 return htab_hash_pointer (cache->frame);
1632 }
1633
1634 /* eq_f for htab_create_alloc of bfcache. */
1635
1636 static int
1637 bfcache_eq (const void *arg1, const void *arg2)
1638 {
1639 const struct btrace_frame_cache *cache1
1640 = (const struct btrace_frame_cache *) arg1;
1641 const struct btrace_frame_cache *cache2
1642 = (const struct btrace_frame_cache *) arg2;
1643
1644 return cache1->frame == cache2->frame;
1645 }
1646
1647 /* Create a new btrace frame cache. */
1648
1649 static struct btrace_frame_cache *
1650 bfcache_new (frame_info_ptr frame)
1651 {
1652 struct btrace_frame_cache *cache;
1653 void **slot;
1654
1655 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1656 cache->frame = frame.get ();
1657
1658 slot = htab_find_slot (bfcache, cache, INSERT);
1659 gdb_assert (*slot == NULL);
1660 *slot = cache;
1661
1662 return cache;
1663 }
1664
1665 /* Extract the branch trace function from a branch trace frame. */
1666
1667 static const struct btrace_function *
1668 btrace_get_frame_function (frame_info_ptr frame)
1669 {
1670 const struct btrace_frame_cache *cache;
1671 struct btrace_frame_cache pattern;
1672 void **slot;
1673
1674 pattern.frame = frame.get ();
1675
1676 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1677 if (slot == NULL)
1678 return NULL;
1679
1680 cache = (const struct btrace_frame_cache *) *slot;
1681 return cache->bfun;
1682 }
1683
1684 /* Implement stop_reason method for record_btrace_frame_unwind. */
1685
1686 static enum unwind_stop_reason
1687 record_btrace_frame_unwind_stop_reason (frame_info_ptr this_frame,
1688 void **this_cache)
1689 {
1690 const struct btrace_frame_cache *cache;
1691 const struct btrace_function *bfun;
1692
1693 cache = (const struct btrace_frame_cache *) *this_cache;
1694 bfun = cache->bfun;
1695 gdb_assert (bfun != NULL);
1696
1697 if (bfun->up == 0)
1698 return UNWIND_UNAVAILABLE;
1699
1700 return UNWIND_NO_REASON;
1701 }
1702
1703 /* Implement this_id method for record_btrace_frame_unwind. */
1704
1705 static void
1706 record_btrace_frame_this_id (frame_info_ptr this_frame, void **this_cache,
1707 struct frame_id *this_id)
1708 {
1709 const struct btrace_frame_cache *cache;
1710 const struct btrace_function *bfun;
1711 struct btrace_call_iterator it;
1712 CORE_ADDR code, special;
1713
1714 cache = (const struct btrace_frame_cache *) *this_cache;
1715
1716 bfun = cache->bfun;
1717 gdb_assert (bfun != NULL);
1718
1719 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1720 bfun = btrace_call_get (&it);
1721
1722 code = get_frame_func (this_frame);
1723 special = bfun->number;
1724
1725 *this_id = frame_id_build_unavailable_stack_special (code, special);
1726
1727 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1728 btrace_get_bfun_name (cache->bfun),
1729 core_addr_to_string_nz (this_id->code_addr),
1730 core_addr_to_string_nz (this_id->special_addr));
1731 }
1732
1733 /* Implement prev_register method for record_btrace_frame_unwind. */
1734
1735 static struct value *
1736 record_btrace_frame_prev_register (frame_info_ptr this_frame,
1737 void **this_cache,
1738 int regnum)
1739 {
1740 const struct btrace_frame_cache *cache;
1741 const struct btrace_function *bfun, *caller;
1742 struct btrace_call_iterator it;
1743 struct gdbarch *gdbarch;
1744 CORE_ADDR pc;
1745 int pcreg;
1746
1747 gdbarch = get_frame_arch (this_frame);
1748 pcreg = gdbarch_pc_regnum (gdbarch);
1749 if (pcreg < 0 || regnum != pcreg)
1750 throw_error (NOT_AVAILABLE_ERROR,
1751 _("Registers are not available in btrace record history"));
1752
1753 cache = (const struct btrace_frame_cache *) *this_cache;
1754 bfun = cache->bfun;
1755 gdb_assert (bfun != NULL);
1756
1757 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1758 throw_error (NOT_AVAILABLE_ERROR,
1759 _("No caller in btrace record history"));
1760
1761 caller = btrace_call_get (&it);
1762
1763 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1764 pc = caller->insn.front ().pc;
1765 else
1766 {
1767 pc = caller->insn.back ().pc;
1768 pc += gdb_insn_length (gdbarch, pc);
1769 }
1770
1771 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1772 btrace_get_bfun_name (bfun), bfun->level,
1773 core_addr_to_string_nz (pc));
1774
1775 return frame_unwind_got_address (this_frame, regnum, pc);
1776 }
1777
1778 /* Implement sniffer method for record_btrace_frame_unwind. */
1779
1780 static int
1781 record_btrace_frame_sniffer (const struct frame_unwind *self,
1782 frame_info_ptr this_frame,
1783 void **this_cache)
1784 {
1785 const struct btrace_function *bfun;
1786 struct btrace_frame_cache *cache;
1787 struct thread_info *tp;
1788 frame_info_ptr next;
1789
1790 /* THIS_FRAME does not contain a reference to its thread. */
1791 tp = inferior_thread ();
1792
1793 bfun = NULL;
1794 next = get_next_frame (this_frame);
1795 if (next == NULL)
1796 {
1797 const struct btrace_insn_iterator *replay;
1798
1799 replay = tp->btrace.replay;
1800 if (replay != NULL)
1801 bfun = &replay->btinfo->functions[replay->call_index];
1802 }
1803 else
1804 {
1805 const struct btrace_function *callee;
1806 struct btrace_call_iterator it;
1807
1808 callee = btrace_get_frame_function (next);
1809 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1810 return 0;
1811
1812 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1813 return 0;
1814
1815 bfun = btrace_call_get (&it);
1816 }
1817
1818 if (bfun == NULL)
1819 return 0;
1820
1821 DEBUG ("[frame] sniffed frame for %s on level %d",
1822 btrace_get_bfun_name (bfun), bfun->level);
1823
1824 /* This is our frame. Initialize the frame cache. */
1825 cache = bfcache_new (this_frame);
1826 cache->tp = tp;
1827 cache->bfun = bfun;
1828
1829 *this_cache = cache;
1830 return 1;
1831 }
1832
1833 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1834
1835 static int
1836 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1837 frame_info_ptr this_frame,
1838 void **this_cache)
1839 {
1840 const struct btrace_function *bfun, *callee;
1841 struct btrace_frame_cache *cache;
1842 struct btrace_call_iterator it;
1843 frame_info_ptr next;
1844 struct thread_info *tinfo;
1845
1846 next = get_next_frame (this_frame);
1847 if (next == NULL)
1848 return 0;
1849
1850 callee = btrace_get_frame_function (next);
1851 if (callee == NULL)
1852 return 0;
1853
1854 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1855 return 0;
1856
1857 tinfo = inferior_thread ();
1858 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1859 return 0;
1860
1861 bfun = btrace_call_get (&it);
1862
1863 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1864 btrace_get_bfun_name (bfun), bfun->level);
1865
1866 /* This is our frame. Initialize the frame cache. */
1867 cache = bfcache_new (this_frame);
1868 cache->tp = tinfo;
1869 cache->bfun = bfun;
1870
1871 *this_cache = cache;
1872 return 1;
1873 }
1874
1875 static void
1876 record_btrace_frame_dealloc_cache (frame_info *self, void *this_cache)
1877 {
1878 struct btrace_frame_cache *cache;
1879 void **slot;
1880
1881 cache = (struct btrace_frame_cache *) this_cache;
1882
1883 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1884 gdb_assert (slot != NULL);
1885
1886 htab_remove_elt (bfcache, cache);
1887 }
1888
1889 /* btrace recording does not store previous memory content, neither the stack
1890 frames content. Any unwinding would return erroneous results as the stack
1891 contents no longer matches the changed PC value restored from history.
1892 Therefore this unwinder reports any possibly unwound registers as
1893 <unavailable>. */
1894
1895 const struct frame_unwind record_btrace_frame_unwind =
1896 {
1897 "record-btrace",
1898 NORMAL_FRAME,
1899 record_btrace_frame_unwind_stop_reason,
1900 record_btrace_frame_this_id,
1901 record_btrace_frame_prev_register,
1902 NULL,
1903 record_btrace_frame_sniffer,
1904 record_btrace_frame_dealloc_cache
1905 };
1906
1907 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1908 {
1909 "record-btrace tailcall",
1910 TAILCALL_FRAME,
1911 record_btrace_frame_unwind_stop_reason,
1912 record_btrace_frame_this_id,
1913 record_btrace_frame_prev_register,
1914 NULL,
1915 record_btrace_tailcall_frame_sniffer,
1916 record_btrace_frame_dealloc_cache
1917 };
1918
1919 /* Implement the get_unwinder method. */
1920
1921 const struct frame_unwind *
1922 record_btrace_target::get_unwinder ()
1923 {
1924 return &record_btrace_frame_unwind;
1925 }
1926
1927 /* Implement the get_tailcall_unwinder method. */
1928
1929 const struct frame_unwind *
1930 record_btrace_target::get_tailcall_unwinder ()
1931 {
1932 return &record_btrace_tailcall_frame_unwind;
1933 }
1934
1935 /* Return a human-readable string for FLAG. */
1936
1937 static const char *
1938 btrace_thread_flag_to_str (btrace_thread_flags flag)
1939 {
1940 switch (flag)
1941 {
1942 case BTHR_STEP:
1943 return "step";
1944
1945 case BTHR_RSTEP:
1946 return "reverse-step";
1947
1948 case BTHR_CONT:
1949 return "cont";
1950
1951 case BTHR_RCONT:
1952 return "reverse-cont";
1953
1954 case BTHR_STOP:
1955 return "stop";
1956 }
1957
1958 return "<invalid>";
1959 }
1960
1961 /* Indicate that TP should be resumed according to FLAG. */
1962
1963 static void
1964 record_btrace_resume_thread (struct thread_info *tp,
1965 enum btrace_thread_flag flag)
1966 {
1967 struct btrace_thread_info *btinfo;
1968
1969 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1970 tp->ptid.to_string ().c_str (), flag,
1971 btrace_thread_flag_to_str (flag));
1972
1973 btinfo = &tp->btrace;
1974
1975 /* Fetch the latest branch trace. */
1976 btrace_fetch (tp, record_btrace_get_cpu ());
1977
1978 /* A resume request overwrites a preceding resume or stop request. */
1979 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1980 btinfo->flags |= flag;
1981 }
1982
1983 /* Get the current frame for TP. */
1984
1985 static struct frame_id
1986 get_thread_current_frame_id (struct thread_info *tp)
1987 {
1988 /* Set current thread, which is implicitly used by
1989 get_current_frame. */
1990 scoped_restore_current_thread restore_thread;
1991
1992 switch_to_thread (tp);
1993
1994 process_stratum_target *proc_target = tp->inf->process_target ();
1995
1996 /* Clear the executing flag to allow changes to the current frame.
1997 We are not actually running, yet. We just started a reverse execution
1998 command or a record goto command.
1999 For the latter, EXECUTING is false and this has no effect.
2000 For the former, EXECUTING is true and we're in wait, about to
2001 move the thread. Since we need to recompute the stack, we temporarily
2002 set EXECUTING to false. */
2003 bool executing = tp->executing ();
2004 set_executing (proc_target, inferior_ptid, false);
2005 SCOPE_EXIT
2006 {
2007 set_executing (proc_target, inferior_ptid, executing);
2008 };
2009 return get_frame_id (get_current_frame ());
2010 }
2011
2012 /* Start replaying a thread. */
2013
2014 static struct btrace_insn_iterator *
2015 record_btrace_start_replaying (struct thread_info *tp)
2016 {
2017 struct btrace_insn_iterator *replay;
2018 struct btrace_thread_info *btinfo;
2019
2020 btinfo = &tp->btrace;
2021 replay = NULL;
2022
2023 /* We can't start replaying without trace. */
2024 if (btinfo->functions.empty ())
2025 error (_("No trace."));
2026
2027 /* GDB stores the current frame_id when stepping in order to detects steps
2028 into subroutines.
2029 Since frames are computed differently when we're replaying, we need to
2030 recompute those stored frames and fix them up so we can still detect
2031 subroutines after we started replaying. */
2032 try
2033 {
2034 struct frame_id frame_id;
2035 int upd_step_frame_id, upd_step_stack_frame_id;
2036
2037 /* The current frame without replaying - computed via normal unwind. */
2038 frame_id = get_thread_current_frame_id (tp);
2039
2040 /* Check if we need to update any stepping-related frame id's. */
2041 upd_step_frame_id = (frame_id == tp->control.step_frame_id);
2042 upd_step_stack_frame_id = (frame_id == tp->control.step_stack_frame_id);
2043
2044 /* We start replaying at the end of the branch trace. This corresponds
2045 to the current instruction. */
2046 replay = XNEW (struct btrace_insn_iterator);
2047 btrace_insn_end (replay, btinfo);
2048
2049 /* Skip gaps at the end of the trace. */
2050 while (btrace_insn_get (replay) == NULL)
2051 {
2052 unsigned int steps;
2053
2054 steps = btrace_insn_prev (replay, 1);
2055 if (steps == 0)
2056 error (_("No trace."));
2057 }
2058
2059 /* We're not replaying, yet. */
2060 gdb_assert (btinfo->replay == NULL);
2061 btinfo->replay = replay;
2062
2063 /* Make sure we're not using any stale registers. */
2064 registers_changed_thread (tp);
2065
2066 /* The current frame with replaying - computed via btrace unwind. */
2067 frame_id = get_thread_current_frame_id (tp);
2068
2069 /* Replace stepping related frames where necessary. */
2070 if (upd_step_frame_id)
2071 tp->control.step_frame_id = frame_id;
2072 if (upd_step_stack_frame_id)
2073 tp->control.step_stack_frame_id = frame_id;
2074 }
2075 catch (const gdb_exception &except)
2076 {
2077 xfree (btinfo->replay);
2078 btinfo->replay = NULL;
2079
2080 registers_changed_thread (tp);
2081
2082 throw;
2083 }
2084
2085 return replay;
2086 }
2087
2088 /* Stop replaying a thread. */
2089
2090 static void
2091 record_btrace_stop_replaying (struct thread_info *tp)
2092 {
2093 struct btrace_thread_info *btinfo;
2094
2095 btinfo = &tp->btrace;
2096
2097 xfree (btinfo->replay);
2098 btinfo->replay = NULL;
2099
2100 /* Make sure we're not leaving any stale registers. */
2101 registers_changed_thread (tp);
2102 }
2103
2104 /* Stop replaying TP if it is at the end of its execution history. */
2105
2106 static void
2107 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2108 {
2109 struct btrace_insn_iterator *replay, end;
2110 struct btrace_thread_info *btinfo;
2111
2112 btinfo = &tp->btrace;
2113 replay = btinfo->replay;
2114
2115 if (replay == NULL)
2116 return;
2117
2118 btrace_insn_end (&end, btinfo);
2119
2120 if (btrace_insn_cmp (replay, &end) == 0)
2121 record_btrace_stop_replaying (tp);
2122 }
2123
2124 /* The resume method of target record-btrace. */
2125
2126 void
2127 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2128 {
2129 enum btrace_thread_flag flag, cflag;
2130
2131 DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (),
2132 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2133 step ? "step" : "cont");
2134
2135 /* Store the execution direction of the last resume.
2136
2137 If there is more than one resume call, we have to rely on infrun
2138 to not change the execution direction in-between. */
2139 record_btrace_resume_exec_dir = ::execution_direction;
2140
2141 /* As long as we're not replaying, just forward the request.
2142
2143 For non-stop targets this means that no thread is replaying. In order to
2144 make progress, we may need to explicitly move replaying threads to the end
2145 of their execution history. */
2146 if ((::execution_direction != EXEC_REVERSE)
2147 && !record_is_replaying (minus_one_ptid))
2148 {
2149 this->beneath ()->resume (ptid, step, signal);
2150 return;
2151 }
2152
2153 /* Compute the btrace thread flag for the requested move. */
2154 if (::execution_direction == EXEC_REVERSE)
2155 {
2156 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2157 cflag = BTHR_RCONT;
2158 }
2159 else
2160 {
2161 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2162 cflag = BTHR_CONT;
2163 }
2164
2165 /* We just indicate the resume intent here. The actual stepping happens in
2166 record_btrace_wait below.
2167
2168 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2169
2170 process_stratum_target *proc_target = current_inferior ()->process_target ();
2171
2172 if (!target_is_non_stop_p ())
2173 {
2174 gdb_assert (inferior_ptid.matches (ptid));
2175
2176 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2177 {
2178 if (tp->ptid.matches (inferior_ptid))
2179 record_btrace_resume_thread (tp, flag);
2180 else
2181 record_btrace_resume_thread (tp, cflag);
2182 }
2183 }
2184 else
2185 {
2186 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2187 record_btrace_resume_thread (tp, flag);
2188 }
2189
2190 /* Async support. */
2191 if (target_can_async_p ())
2192 {
2193 target_async (true);
2194 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2195 }
2196 }
2197
2198 /* Cancel resuming TP. */
2199
2200 static void
2201 record_btrace_cancel_resume (struct thread_info *tp)
2202 {
2203 btrace_thread_flags flags;
2204
2205 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2206 if (flags == 0)
2207 return;
2208
2209 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2210 print_thread_id (tp),
2211 tp->ptid.to_string ().c_str (), flags.raw (),
2212 btrace_thread_flag_to_str (flags));
2213
2214 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2215 record_btrace_stop_replaying_at_end (tp);
2216 }
2217
2218 /* Return a target_waitstatus indicating that we ran out of history. */
2219
2220 static struct target_waitstatus
2221 btrace_step_no_history (void)
2222 {
2223 struct target_waitstatus status;
2224
2225 status.set_no_history ();
2226
2227 return status;
2228 }
2229
2230 /* Return a target_waitstatus indicating that a step finished. */
2231
2232 static struct target_waitstatus
2233 btrace_step_stopped (void)
2234 {
2235 struct target_waitstatus status;
2236
2237 status.set_stopped (GDB_SIGNAL_TRAP);
2238
2239 return status;
2240 }
2241
2242 /* Return a target_waitstatus indicating that a thread was stopped as
2243 requested. */
2244
2245 static struct target_waitstatus
2246 btrace_step_stopped_on_request (void)
2247 {
2248 struct target_waitstatus status;
2249
2250 status.set_stopped (GDB_SIGNAL_0);
2251
2252 return status;
2253 }
2254
2255 /* Return a target_waitstatus indicating a spurious stop. */
2256
2257 static struct target_waitstatus
2258 btrace_step_spurious (void)
2259 {
2260 struct target_waitstatus status;
2261
2262 status.set_spurious ();
2263
2264 return status;
2265 }
2266
2267 /* Return a target_waitstatus indicating that the thread was not resumed. */
2268
2269 static struct target_waitstatus
2270 btrace_step_no_resumed (void)
2271 {
2272 struct target_waitstatus status;
2273
2274 status.set_no_resumed ();
2275
2276 return status;
2277 }
2278
2279 /* Return a target_waitstatus indicating that we should wait again. */
2280
2281 static struct target_waitstatus
2282 btrace_step_again (void)
2283 {
2284 struct target_waitstatus status;
2285
2286 status.set_ignore ();
2287
2288 return status;
2289 }
2290
2291 /* Clear the record histories. */
2292
2293 static void
2294 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2295 {
2296 xfree (btinfo->insn_history);
2297 xfree (btinfo->call_history);
2298
2299 btinfo->insn_history = NULL;
2300 btinfo->call_history = NULL;
2301 }
2302
2303 /* Check whether TP's current replay position is at a breakpoint. */
2304
2305 static int
2306 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2307 {
2308 struct btrace_insn_iterator *replay;
2309 struct btrace_thread_info *btinfo;
2310 const struct btrace_insn *insn;
2311
2312 btinfo = &tp->btrace;
2313 replay = btinfo->replay;
2314
2315 if (replay == NULL)
2316 return 0;
2317
2318 insn = btrace_insn_get (replay);
2319 if (insn == NULL)
2320 return 0;
2321
2322 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2323 &btinfo->stop_reason);
2324 }
2325
2326 /* Step one instruction in forward direction. */
2327
2328 static struct target_waitstatus
2329 record_btrace_single_step_forward (struct thread_info *tp)
2330 {
2331 struct btrace_insn_iterator *replay, end, start;
2332 struct btrace_thread_info *btinfo;
2333
2334 btinfo = &tp->btrace;
2335 replay = btinfo->replay;
2336
2337 /* We're done if we're not replaying. */
2338 if (replay == NULL)
2339 return btrace_step_no_history ();
2340
2341 /* Check if we're stepping a breakpoint. */
2342 if (record_btrace_replay_at_breakpoint (tp))
2343 return btrace_step_stopped ();
2344
2345 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2346 jump back to the instruction at which we started. */
2347 start = *replay;
2348 do
2349 {
2350 unsigned int steps;
2351
2352 /* We will bail out here if we continue stepping after reaching the end
2353 of the execution history. */
2354 steps = btrace_insn_next (replay, 1);
2355 if (steps == 0)
2356 {
2357 *replay = start;
2358 return btrace_step_no_history ();
2359 }
2360 }
2361 while (btrace_insn_get (replay) == NULL);
2362
2363 /* Determine the end of the instruction trace. */
2364 btrace_insn_end (&end, btinfo);
2365
2366 /* The execution trace contains (and ends with) the current instruction.
2367 This instruction has not been executed, yet, so the trace really ends
2368 one instruction earlier. */
2369 if (btrace_insn_cmp (replay, &end) == 0)
2370 return btrace_step_no_history ();
2371
2372 return btrace_step_spurious ();
2373 }
2374
2375 /* Step one instruction in backward direction. */
2376
2377 static struct target_waitstatus
2378 record_btrace_single_step_backward (struct thread_info *tp)
2379 {
2380 struct btrace_insn_iterator *replay, start;
2381 struct btrace_thread_info *btinfo;
2382
2383 btinfo = &tp->btrace;
2384 replay = btinfo->replay;
2385
2386 /* Start replaying if we're not already doing so. */
2387 if (replay == NULL)
2388 replay = record_btrace_start_replaying (tp);
2389
2390 /* If we can't step any further, we reached the end of the history.
2391 Skip gaps during replay. If we end up at a gap (at the beginning of
2392 the trace), jump back to the instruction at which we started. */
2393 start = *replay;
2394 do
2395 {
2396 unsigned int steps;
2397
2398 steps = btrace_insn_prev (replay, 1);
2399 if (steps == 0)
2400 {
2401 *replay = start;
2402 return btrace_step_no_history ();
2403 }
2404 }
2405 while (btrace_insn_get (replay) == NULL);
2406
2407 /* Check if we're stepping a breakpoint.
2408
2409 For reverse-stepping, this check is after the step. There is logic in
2410 infrun.c that handles reverse-stepping separately. See, for example,
2411 proceed and adjust_pc_after_break.
2412
2413 This code assumes that for reverse-stepping, PC points to the last
2414 de-executed instruction, whereas for forward-stepping PC points to the
2415 next to-be-executed instruction. */
2416 if (record_btrace_replay_at_breakpoint (tp))
2417 return btrace_step_stopped ();
2418
2419 return btrace_step_spurious ();
2420 }
2421
2422 /* Step a single thread. */
2423
2424 static struct target_waitstatus
2425 record_btrace_step_thread (struct thread_info *tp)
2426 {
2427 struct btrace_thread_info *btinfo;
2428 struct target_waitstatus status;
2429 btrace_thread_flags flags;
2430
2431 btinfo = &tp->btrace;
2432
2433 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2434 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2435
2436 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2437 tp->ptid.to_string ().c_str (), flags.raw (),
2438 btrace_thread_flag_to_str (flags));
2439
2440 /* We can't step without an execution history. */
2441 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2442 return btrace_step_no_history ();
2443
2444 switch (flags)
2445 {
2446 default:
2447 internal_error (_("invalid stepping type."));
2448
2449 case BTHR_STOP:
2450 return btrace_step_stopped_on_request ();
2451
2452 case BTHR_STEP:
2453 status = record_btrace_single_step_forward (tp);
2454 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2455 break;
2456
2457 return btrace_step_stopped ();
2458
2459 case BTHR_RSTEP:
2460 status = record_btrace_single_step_backward (tp);
2461 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2462 break;
2463
2464 return btrace_step_stopped ();
2465
2466 case BTHR_CONT:
2467 status = record_btrace_single_step_forward (tp);
2468 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2469 break;
2470
2471 btinfo->flags |= flags;
2472 return btrace_step_again ();
2473
2474 case BTHR_RCONT:
2475 status = record_btrace_single_step_backward (tp);
2476 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
2477 break;
2478
2479 btinfo->flags |= flags;
2480 return btrace_step_again ();
2481 }
2482
2483 /* We keep threads moving at the end of their execution history. The wait
2484 method will stop the thread for whom the event is reported. */
2485 if (status.kind () == TARGET_WAITKIND_NO_HISTORY)
2486 btinfo->flags |= flags;
2487
2488 return status;
2489 }
2490
2491 /* Announce further events if necessary. */
2492
2493 static void
2494 record_btrace_maybe_mark_async_event
2495 (const std::vector<thread_info *> &moving,
2496 const std::vector<thread_info *> &no_history)
2497 {
2498 bool more_moving = !moving.empty ();
2499 bool more_no_history = !no_history.empty ();;
2500
2501 if (!more_moving && !more_no_history)
2502 return;
2503
2504 if (more_moving)
2505 DEBUG ("movers pending");
2506
2507 if (more_no_history)
2508 DEBUG ("no-history pending");
2509
2510 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2511 }
2512
2513 /* The wait method of target record-btrace. */
2514
2515 ptid_t
2516 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2517 target_wait_flags options)
2518 {
2519 std::vector<thread_info *> moving;
2520 std::vector<thread_info *> no_history;
2521
2522 /* Clear this, if needed we'll re-mark it below. */
2523 clear_async_event_handler (record_btrace_async_inferior_event_handler);
2524
2525 DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (),
2526 (unsigned) options);
2527
2528 /* As long as we're not replaying, just forward the request. */
2529 if ((::execution_direction != EXEC_REVERSE)
2530 && !record_is_replaying (minus_one_ptid))
2531 {
2532 return this->beneath ()->wait (ptid, status, options);
2533 }
2534
2535 /* Keep a work list of moving threads. */
2536 process_stratum_target *proc_target = current_inferior ()->process_target ();
2537 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2538 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2539 moving.push_back (tp);
2540
2541 if (moving.empty ())
2542 {
2543 *status = btrace_step_no_resumed ();
2544
2545 DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (),
2546 status->to_string ().c_str ());
2547
2548 return null_ptid;
2549 }
2550
2551 /* Step moving threads one by one, one step each, until either one thread
2552 reports an event or we run out of threads to step.
2553
2554 When stepping more than one thread, chances are that some threads reach
2555 the end of their execution history earlier than others. If we reported
2556 this immediately, all-stop on top of non-stop would stop all threads and
2557 resume the same threads next time. And we would report the same thread
2558 having reached the end of its execution history again.
2559
2560 In the worst case, this would starve the other threads. But even if other
2561 threads would be allowed to make progress, this would result in far too
2562 many intermediate stops.
2563
2564 We therefore delay the reporting of "no execution history" until we have
2565 nothing else to report. By this time, all threads should have moved to
2566 either the beginning or the end of their execution history. There will
2567 be a single user-visible stop. */
2568 struct thread_info *eventing = NULL;
2569 while ((eventing == NULL) && !moving.empty ())
2570 {
2571 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2572 {
2573 thread_info *tp = moving[ix];
2574
2575 *status = record_btrace_step_thread (tp);
2576
2577 switch (status->kind ())
2578 {
2579 case TARGET_WAITKIND_IGNORE:
2580 ix++;
2581 break;
2582
2583 case TARGET_WAITKIND_NO_HISTORY:
2584 no_history.push_back (ordered_remove (moving, ix));
2585 break;
2586
2587 default:
2588 eventing = unordered_remove (moving, ix);
2589 break;
2590 }
2591 }
2592 }
2593
2594 if (eventing == NULL)
2595 {
2596 /* We started with at least one moving thread. This thread must have
2597 either stopped or reached the end of its execution history.
2598
2599 In the former case, EVENTING must not be NULL.
2600 In the latter case, NO_HISTORY must not be empty. */
2601 gdb_assert (!no_history.empty ());
2602
2603 /* We kept threads moving at the end of their execution history. Stop
2604 EVENTING now that we are going to report its stop. */
2605 eventing = unordered_remove (no_history, 0);
2606 eventing->btrace.flags &= ~BTHR_MOVE;
2607
2608 *status = btrace_step_no_history ();
2609 }
2610
2611 gdb_assert (eventing != NULL);
2612
2613 /* We kept threads replaying at the end of their execution history. Stop
2614 replaying EVENTING now that we are going to report its stop. */
2615 record_btrace_stop_replaying_at_end (eventing);
2616
2617 /* Stop all other threads. */
2618 if (!target_is_non_stop_p ())
2619 {
2620 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2621 record_btrace_cancel_resume (tp);
2622 }
2623
2624 /* In async mode, we need to announce further events. */
2625 if (target_is_async_p ())
2626 record_btrace_maybe_mark_async_event (moving, no_history);
2627
2628 /* Start record histories anew from the current position. */
2629 record_btrace_clear_histories (&eventing->btrace);
2630
2631 /* We moved the replay position but did not update registers. */
2632 registers_changed_thread (eventing);
2633
2634 DEBUG ("wait ended by thread %s (%s): %s",
2635 print_thread_id (eventing),
2636 eventing->ptid.to_string ().c_str (),
2637 status->to_string ().c_str ());
2638
2639 return eventing->ptid;
2640 }
2641
2642 /* The stop method of target record-btrace. */
2643
2644 void
2645 record_btrace_target::stop (ptid_t ptid)
2646 {
2647 DEBUG ("stop %s", ptid.to_string ().c_str ());
2648
2649 /* As long as we're not replaying, just forward the request. */
2650 if ((::execution_direction != EXEC_REVERSE)
2651 && !record_is_replaying (minus_one_ptid))
2652 {
2653 this->beneath ()->stop (ptid);
2654 }
2655 else
2656 {
2657 process_stratum_target *proc_target
2658 = current_inferior ()->process_target ();
2659
2660 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2661 {
2662 tp->btrace.flags &= ~BTHR_MOVE;
2663 tp->btrace.flags |= BTHR_STOP;
2664 }
2665 }
2666 }
2667
2668 /* The can_execute_reverse method of target record-btrace. */
2669
2670 bool
2671 record_btrace_target::can_execute_reverse ()
2672 {
2673 return true;
2674 }
2675
2676 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2677
2678 bool
2679 record_btrace_target::stopped_by_sw_breakpoint ()
2680 {
2681 if (record_is_replaying (minus_one_ptid))
2682 {
2683 struct thread_info *tp = inferior_thread ();
2684
2685 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2686 }
2687
2688 return this->beneath ()->stopped_by_sw_breakpoint ();
2689 }
2690
2691 /* The supports_stopped_by_sw_breakpoint method of target
2692 record-btrace. */
2693
2694 bool
2695 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2696 {
2697 if (record_is_replaying (minus_one_ptid))
2698 return true;
2699
2700 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2701 }
2702
2703 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2704
2705 bool
2706 record_btrace_target::stopped_by_hw_breakpoint ()
2707 {
2708 if (record_is_replaying (minus_one_ptid))
2709 {
2710 struct thread_info *tp = inferior_thread ();
2711
2712 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2713 }
2714
2715 return this->beneath ()->stopped_by_hw_breakpoint ();
2716 }
2717
2718 /* The supports_stopped_by_hw_breakpoint method of target
2719 record-btrace. */
2720
2721 bool
2722 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2723 {
2724 if (record_is_replaying (minus_one_ptid))
2725 return true;
2726
2727 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2728 }
2729
2730 /* The update_thread_list method of target record-btrace. */
2731
2732 void
2733 record_btrace_target::update_thread_list ()
2734 {
2735 /* We don't add or remove threads during replay. */
2736 if (record_is_replaying (minus_one_ptid))
2737 return;
2738
2739 /* Forward the request. */
2740 this->beneath ()->update_thread_list ();
2741 }
2742
2743 /* The thread_alive method of target record-btrace. */
2744
2745 bool
2746 record_btrace_target::thread_alive (ptid_t ptid)
2747 {
2748 /* We don't add or remove threads during replay. */
2749 if (record_is_replaying (minus_one_ptid))
2750 return true;
2751
2752 /* Forward the request. */
2753 return this->beneath ()->thread_alive (ptid);
2754 }
2755
2756 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2757 is stopped. */
2758
2759 static void
2760 record_btrace_set_replay (struct thread_info *tp,
2761 const struct btrace_insn_iterator *it)
2762 {
2763 struct btrace_thread_info *btinfo;
2764
2765 btinfo = &tp->btrace;
2766
2767 if (it == NULL)
2768 record_btrace_stop_replaying (tp);
2769 else
2770 {
2771 if (btinfo->replay == NULL)
2772 record_btrace_start_replaying (tp);
2773 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2774 return;
2775
2776 *btinfo->replay = *it;
2777 registers_changed_thread (tp);
2778 }
2779
2780 /* Start anew from the new replay position. */
2781 record_btrace_clear_histories (btinfo);
2782
2783 inferior_thread ()->set_stop_pc (regcache_read_pc (get_current_regcache ()));
2784 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2785 }
2786
2787 /* The goto_record_begin method of target record-btrace. */
2788
2789 void
2790 record_btrace_target::goto_record_begin ()
2791 {
2792 struct thread_info *tp;
2793 struct btrace_insn_iterator begin;
2794
2795 tp = require_btrace_thread ();
2796
2797 btrace_insn_begin (&begin, &tp->btrace);
2798
2799 /* Skip gaps at the beginning of the trace. */
2800 while (btrace_insn_get (&begin) == NULL)
2801 {
2802 unsigned int steps;
2803
2804 steps = btrace_insn_next (&begin, 1);
2805 if (steps == 0)
2806 error (_("No trace."));
2807 }
2808
2809 record_btrace_set_replay (tp, &begin);
2810 }
2811
2812 /* The goto_record_end method of target record-btrace. */
2813
2814 void
2815 record_btrace_target::goto_record_end ()
2816 {
2817 struct thread_info *tp;
2818
2819 tp = require_btrace_thread ();
2820
2821 record_btrace_set_replay (tp, NULL);
2822 }
2823
2824 /* The goto_record method of target record-btrace. */
2825
2826 void
2827 record_btrace_target::goto_record (ULONGEST insn)
2828 {
2829 struct thread_info *tp;
2830 struct btrace_insn_iterator it;
2831 unsigned int number;
2832 int found;
2833
2834 number = insn;
2835
2836 /* Check for wrap-arounds. */
2837 if (number != insn)
2838 error (_("Instruction number out of range."));
2839
2840 tp = require_btrace_thread ();
2841
2842 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2843
2844 /* Check if the instruction could not be found or is a gap. */
2845 if (found == 0 || btrace_insn_get (&it) == NULL)
2846 error (_("No such instruction."));
2847
2848 record_btrace_set_replay (tp, &it);
2849 }
2850
2851 /* The record_stop_replaying method of target record-btrace. */
2852
2853 void
2854 record_btrace_target::record_stop_replaying ()
2855 {
2856 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2857 record_btrace_stop_replaying (tp);
2858 }
2859
2860 /* The execution_direction target method. */
2861
2862 enum exec_direction_kind
2863 record_btrace_target::execution_direction ()
2864 {
2865 return record_btrace_resume_exec_dir;
2866 }
2867
2868 /* The prepare_to_generate_core target method. */
2869
2870 void
2871 record_btrace_target::prepare_to_generate_core ()
2872 {
2873 record_btrace_generating_corefile = 1;
2874 }
2875
2876 /* The done_generating_core target method. */
2877
2878 void
2879 record_btrace_target::done_generating_core ()
2880 {
2881 record_btrace_generating_corefile = 0;
2882 }
2883
2884 /* Start recording in BTS format. */
2885
2886 static void
2887 cmd_record_btrace_bts_start (const char *args, int from_tty)
2888 {
2889 if (args != NULL && *args != 0)
2890 error (_("Invalid argument."));
2891
2892 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2893
2894 try
2895 {
2896 execute_command ("target record-btrace", from_tty);
2897 }
2898 catch (const gdb_exception &exception)
2899 {
2900 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2901 throw;
2902 }
2903 }
2904
2905 /* Start recording in Intel Processor Trace format. */
2906
2907 static void
2908 cmd_record_btrace_pt_start (const char *args, int from_tty)
2909 {
2910 if (args != NULL && *args != 0)
2911 error (_("Invalid argument."));
2912
2913 record_btrace_conf.format = BTRACE_FORMAT_PT;
2914
2915 try
2916 {
2917 execute_command ("target record-btrace", from_tty);
2918 }
2919 catch (const gdb_exception &exception)
2920 {
2921 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2922 throw;
2923 }
2924 }
2925
2926 /* Alias for "target record". */
2927
2928 static void
2929 cmd_record_btrace_start (const char *args, int from_tty)
2930 {
2931 if (args != NULL && *args != 0)
2932 error (_("Invalid argument."));
2933
2934 record_btrace_conf.format = BTRACE_FORMAT_PT;
2935
2936 try
2937 {
2938 execute_command ("target record-btrace", from_tty);
2939 }
2940 catch (const gdb_exception_error &exception)
2941 {
2942 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2943
2944 try
2945 {
2946 execute_command ("target record-btrace", from_tty);
2947 }
2948 catch (const gdb_exception &ex)
2949 {
2950 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2951 throw;
2952 }
2953 }
2954 }
2955
2956 /* The "show record btrace replay-memory-access" command. */
2957
2958 static void
2959 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2960 struct cmd_list_element *c, const char *value)
2961 {
2962 gdb_printf (file, _("Replay memory access is %s.\n"),
2963 replay_memory_access);
2964 }
2965
2966 /* The "set record btrace cpu none" command. */
2967
2968 static void
2969 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2970 {
2971 if (args != nullptr && *args != 0)
2972 error (_("Trailing junk: '%s'."), args);
2973
2974 record_btrace_cpu_state = CS_NONE;
2975 }
2976
2977 /* The "set record btrace cpu auto" command. */
2978
2979 static void
2980 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2981 {
2982 if (args != nullptr && *args != 0)
2983 error (_("Trailing junk: '%s'."), args);
2984
2985 record_btrace_cpu_state = CS_AUTO;
2986 }
2987
2988 /* The "set record btrace cpu" command. */
2989
2990 static void
2991 cmd_set_record_btrace_cpu (const char *args, int from_tty)
2992 {
2993 if (args == nullptr)
2994 args = "";
2995
2996 /* We use a hard-coded vendor string for now. */
2997 unsigned int family, model, stepping;
2998 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
2999 &model, &l1, &stepping, &l2);
3000 if (matches == 3)
3001 {
3002 if (strlen (args) != l2)
3003 error (_("Trailing junk: '%s'."), args + l2);
3004 }
3005 else if (matches == 2)
3006 {
3007 if (strlen (args) != l1)
3008 error (_("Trailing junk: '%s'."), args + l1);
3009
3010 stepping = 0;
3011 }
3012 else
3013 error (_("Bad format. See \"help set record btrace cpu\"."));
3014
3015 if (USHRT_MAX < family)
3016 error (_("Cpu family too big."));
3017
3018 if (UCHAR_MAX < model)
3019 error (_("Cpu model too big."));
3020
3021 if (UCHAR_MAX < stepping)
3022 error (_("Cpu stepping too big."));
3023
3024 record_btrace_cpu.vendor = CV_INTEL;
3025 record_btrace_cpu.family = family;
3026 record_btrace_cpu.model = model;
3027 record_btrace_cpu.stepping = stepping;
3028
3029 record_btrace_cpu_state = CS_CPU;
3030 }
3031
3032 /* The "show record btrace cpu" command. */
3033
3034 static void
3035 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3036 {
3037 if (args != nullptr && *args != 0)
3038 error (_("Trailing junk: '%s'."), args);
3039
3040 switch (record_btrace_cpu_state)
3041 {
3042 case CS_AUTO:
3043 gdb_printf (_("btrace cpu is 'auto'.\n"));
3044 return;
3045
3046 case CS_NONE:
3047 gdb_printf (_("btrace cpu is 'none'.\n"));
3048 return;
3049
3050 case CS_CPU:
3051 switch (record_btrace_cpu.vendor)
3052 {
3053 case CV_INTEL:
3054 if (record_btrace_cpu.stepping == 0)
3055 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
3056 record_btrace_cpu.family,
3057 record_btrace_cpu.model);
3058 else
3059 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3060 record_btrace_cpu.family,
3061 record_btrace_cpu.model,
3062 record_btrace_cpu.stepping);
3063 return;
3064 }
3065 }
3066
3067 error (_("Internal error: bad cpu state."));
3068 }
3069
3070 /* The "record bts buffer-size" show value function. */
3071
3072 static void
3073 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3074 struct cmd_list_element *c,
3075 const char *value)
3076 {
3077 gdb_printf (file, _("The record/replay bts buffer size is %s.\n"),
3078 value);
3079 }
3080
3081 /* The "record pt buffer-size" show value function. */
3082
3083 static void
3084 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3085 struct cmd_list_element *c,
3086 const char *value)
3087 {
3088 gdb_printf (file, _("The record/replay pt buffer size is %s.\n"),
3089 value);
3090 }
3091
3092 /* Initialize btrace commands. */
3093
3094 void _initialize_record_btrace ();
3095 void
3096 _initialize_record_btrace ()
3097 {
3098 cmd_list_element *record_btrace_cmd
3099 = add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3100 _("Start branch trace recording."),
3101 &record_btrace_cmdlist, 0, &record_cmdlist);
3102 add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
3103
3104 cmd_list_element *record_btrace_bts_cmd
3105 = add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3106 _("\
3107 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3108 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3109 This format may not be available on all processors."),
3110 &record_btrace_cmdlist);
3111 add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
3112 &record_cmdlist);
3113
3114 cmd_list_element *record_btrace_pt_cmd
3115 = add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3116 _("\
3117 Start branch trace recording in Intel Processor Trace format.\n\n\
3118 This format may not be available on all processors."),
3119 &record_btrace_cmdlist);
3120 add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
3121
3122 add_setshow_prefix_cmd ("btrace", class_support,
3123 _("Set record options."),
3124 _("Show record options."),
3125 &set_record_btrace_cmdlist,
3126 &show_record_btrace_cmdlist,
3127 &set_record_cmdlist, &show_record_cmdlist);
3128
3129 add_setshow_enum_cmd ("replay-memory-access", no_class,
3130 replay_memory_access_types, &replay_memory_access, _("\
3131 Set what memory accesses are allowed during replay."), _("\
3132 Show what memory accesses are allowed during replay."),
3133 _("Default is READ-ONLY.\n\n\
3134 The btrace record target does not trace data.\n\
3135 The memory therefore corresponds to the live target and not \
3136 to the current replay position.\n\n\
3137 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3138 When READ-WRITE, allow accesses to read-only and read-write memory during \
3139 replay."),
3140 NULL, cmd_show_replay_memory_access,
3141 &set_record_btrace_cmdlist,
3142 &show_record_btrace_cmdlist);
3143
3144 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3145 _("\
3146 Set the cpu to be used for trace decode.\n\n\
3147 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3148 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3149 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3150 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3151 When GDB does not support that cpu, this option can be used to enable\n\
3152 workarounds for a similar cpu that GDB supports.\n\n\
3153 When set to \"none\", errata workarounds are disabled."),
3154 &set_record_btrace_cpu_cmdlist,
3155 1,
3156 &set_record_btrace_cmdlist);
3157
3158 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3159 Automatically determine the cpu to be used for trace decode."),
3160 &set_record_btrace_cpu_cmdlist);
3161
3162 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3163 Do not enable errata workarounds for trace decode."),
3164 &set_record_btrace_cpu_cmdlist);
3165
3166 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3167 Show the cpu to be used for trace decode."),
3168 &show_record_btrace_cmdlist);
3169
3170 add_setshow_prefix_cmd ("bts", class_support,
3171 _("Set record btrace bts options."),
3172 _("Show record btrace bts options."),
3173 &set_record_btrace_bts_cmdlist,
3174 &show_record_btrace_bts_cmdlist,
3175 &set_record_btrace_cmdlist,
3176 &show_record_btrace_cmdlist);
3177
3178 add_setshow_uinteger_cmd ("buffer-size", no_class,
3179 &record_btrace_conf.bts.size,
3180 _("Set the record/replay bts buffer size."),
3181 _("Show the record/replay bts buffer size."), _("\
3182 When starting recording request a trace buffer of this size. \
3183 The actual buffer size may differ from the requested size. \
3184 Use \"info record\" to see the actual buffer size.\n\n\
3185 Bigger buffers allow longer recording but also take more time to process \
3186 the recorded execution trace.\n\n\
3187 The trace buffer size may not be changed while recording."), NULL,
3188 show_record_bts_buffer_size_value,
3189 &set_record_btrace_bts_cmdlist,
3190 &show_record_btrace_bts_cmdlist);
3191
3192 add_setshow_prefix_cmd ("pt", class_support,
3193 _("Set record btrace pt options."),
3194 _("Show record btrace pt options."),
3195 &set_record_btrace_pt_cmdlist,
3196 &show_record_btrace_pt_cmdlist,
3197 &set_record_btrace_cmdlist,
3198 &show_record_btrace_cmdlist);
3199
3200 add_setshow_uinteger_cmd ("buffer-size", no_class,
3201 &record_btrace_conf.pt.size,
3202 _("Set the record/replay pt buffer size."),
3203 _("Show the record/replay pt buffer size."), _("\
3204 Bigger buffers allow longer recording but also take more time to process \
3205 the recorded execution.\n\
3206 The actual buffer size may differ from the requested size. Use \"info record\" \
3207 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3208 &set_record_btrace_pt_cmdlist,
3209 &show_record_btrace_pt_cmdlist);
3210
3211 add_target (record_btrace_target_info, record_btrace_target_open);
3212
3213 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3214 xcalloc, xfree);
3215
3216 record_btrace_conf.bts.size = 64 * 1024;
3217 record_btrace_conf.pt.size = 16 * 1024;
3218 }