]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Change boolean options to bool instead of int
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "gdbsupport/vec.h"
42 #include "inferior.h"
43 #include <algorithm>
44 #include "gdbarch.h"
45
46 static const target_info record_btrace_target_info = {
47 "record-btrace",
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
50 };
51
52 /* The target_ops of record-btrace. */
53
54 class record_btrace_target final : public target_ops
55 {
56 public:
57 const target_info &info () const override
58 { return record_btrace_target_info; }
59
60 strata stratum () const override { return record_stratum; }
61
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
123 bool thread_alive (ptid_t ptid) override;
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
128 bool can_execute_reverse () override;
129
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
132
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139 };
140
141 static record_btrace_target record_btrace_ops;
142
143 /* Initialize the record-btrace target ops. */
144
145 /* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
147 static const gdb::observers::token record_btrace_thread_observer_token {};
148
149 /* Memory access types used in set/show record btrace replay-memory-access. */
150 static const char replay_memory_access_read_only[] = "read-only";
151 static const char replay_memory_access_read_write[] = "read-write";
152 static const char *const replay_memory_access_types[] =
153 {
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157 };
158
159 /* The currently allowed replay memory access type. */
160 static const char *replay_memory_access = replay_memory_access_read_only;
161
162 /* The cpu state kinds. */
163 enum record_btrace_cpu_state_kind
164 {
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168 };
169
170 /* The current cpu state. */
171 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173 /* The current cpu for trace decode. */
174 static struct btrace_cpu record_btrace_cpu;
175
176 /* Command lists for "set/show record btrace". */
177 static struct cmd_list_element *set_record_btrace_cmdlist;
178 static struct cmd_list_element *show_record_btrace_cmdlist;
179
180 /* The execution direction of the last resume we got. See record-full.c. */
181 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183 /* The async event handler for reverse/replay execution. */
184 static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
186 /* A flag indicating that we are currently generating a core file. */
187 static int record_btrace_generating_corefile;
188
189 /* The current branch trace configuration. */
190 static struct btrace_config record_btrace_conf;
191
192 /* Command list for "record btrace". */
193 static struct cmd_list_element *record_btrace_cmdlist;
194
195 /* Command lists for "set/show record btrace bts". */
196 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
199 /* Command lists for "set/show record btrace pt". */
200 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
203 /* Command list for "set record btrace cpu". */
204 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
206 /* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209 #define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
219 /* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221 const struct btrace_cpu *
222 record_btrace_get_cpu (void)
223 {
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237 }
238
239 /* Update the branch trace for the current thread and return a pointer to its
240 thread_info.
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
245 static struct thread_info *
246 require_btrace_thread (void)
247 {
248 DEBUG ("require");
249
250 if (inferior_ptid == null_ptid)
251 error (_("No thread."));
252
253 thread_info *tp = inferior_thread ();
254
255 validate_registers_access ();
256
257 btrace_fetch (tp, record_btrace_get_cpu ());
258
259 if (btrace_is_empty (tp))
260 error (_("No trace."));
261
262 return tp;
263 }
264
265 /* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271 static struct btrace_thread_info *
272 require_btrace (void)
273 {
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
279 }
280
281 /* Enable branch tracing for one thread. Warn on errors. */
282
283 static void
284 record_btrace_enable_warn (struct thread_info *tp)
285 {
286 try
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 catch (const gdb_exception_error &error)
291 {
292 warning ("%s", error.what ());
293 }
294 }
295
296 /* Enable automatic tracing of new threads. */
297
298 static void
299 record_btrace_auto_enable (void)
300 {
301 DEBUG ("attach thread observer");
302
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
305 }
306
307 /* Disable automatic tracing of new threads. */
308
309 static void
310 record_btrace_auto_disable (void)
311 {
312 DEBUG ("detach thread observer");
313
314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
315 }
316
317 /* The record-btrace async event handler function. */
318
319 static void
320 record_btrace_handle_async_inferior_event (gdb_client_data data)
321 {
322 inferior_event_handler (INF_REG_EVENT, NULL);
323 }
324
325 /* See record-btrace.h. */
326
327 void
328 record_btrace_push_target (void)
329 {
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
343 }
344
345 /* Disable btrace on a set of threads on scope exit. */
346
347 struct scoped_btrace_disable
348 {
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369 private:
370 std::forward_list<thread_info *> m_threads;
371 };
372
373 /* Open target record-btrace. */
374
375 static void
376 record_btrace_target_open (const char *args, int from_tty)
377 {
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
381
382 DEBUG ("open");
383
384 record_preopen ();
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
389 for (thread_info *tp : all_non_exited_threads ())
390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
391 {
392 btrace_enable (tp, &record_btrace_conf);
393
394 btrace_disable.add_thread (tp);
395 }
396
397 record_btrace_push_target ();
398
399 btrace_disable.discard ();
400 }
401
402 /* The stop_recording method of target record-btrace. */
403
404 void
405 record_btrace_target::stop_recording ()
406 {
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
411 for (thread_info *tp : all_non_exited_threads ())
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414 }
415
416 /* The disconnect method of target record-btrace. */
417
418 void
419 record_btrace_target::disconnect (const char *args,
420 int from_tty)
421 {
422 struct target_ops *beneath = this->beneath ();
423
424 /* Do not stop recording, just clean up GDB side. */
425 unpush_target (this);
426
427 /* Forward disconnect. */
428 beneath->disconnect (args, from_tty);
429 }
430
431 /* The close method of target record-btrace. */
432
433 void
434 record_btrace_target::close ()
435 {
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
445 for (thread_info *tp : all_non_exited_threads ())
446 btrace_teardown (tp);
447 }
448
449 /* The async method of target record-btrace. */
450
451 void
452 record_btrace_target::async (int enable)
453 {
454 if (enable)
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
459 this->beneath ()->async (enable);
460 }
461
462 /* Adjusts the size and returns a human readable size suffix. */
463
464 static const char *
465 record_btrace_adjust_size (unsigned int *size)
466 {
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488 }
489
490 /* Print a BTS configuration. */
491
492 static void
493 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494 {
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504 }
505
506 /* Print an Intel Processor Trace configuration. */
507
508 static void
509 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510 {
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520 }
521
522 /* Print a branch tracing configuration. */
523
524 static void
525 record_btrace_print_conf (const struct btrace_config *conf)
526 {
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545 }
546
547 /* The info_record method of target record-btrace. */
548
549 void
550 record_btrace_target::info_record ()
551 {
552 struct btrace_thread_info *btinfo;
553 const struct btrace_config *conf;
554 struct thread_info *tp;
555 unsigned int insns, calls, gaps;
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
563 validate_registers_access ();
564
565 btinfo = &tp->btrace;
566
567 conf = ::btrace_conf (btinfo);
568 if (conf != NULL)
569 record_btrace_print_conf (conf);
570
571 btrace_fetch (tp, record_btrace_get_cpu ());
572
573 insns = 0;
574 calls = 0;
575 gaps = 0;
576
577 if (!btrace_is_empty (tp))
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
584 calls = btrace_call_number (&call);
585
586 btrace_insn_end (&insn, btinfo);
587 insns = btrace_insn_number (&insn);
588
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
593
594 gaps = btinfo->ngaps;
595 }
596
597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
598 "for thread %s (%s).\n"), insns, calls, gaps,
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
605 }
606
607 /* Print a decode error. */
608
609 static void
610 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612 {
613 const char *errstr = btrace_decode_error (format, errcode);
614
615 uiout->text (_("["));
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
618 {
619 uiout->text (_("decode error ("));
620 uiout->field_signed ("errcode", errcode);
621 uiout->text (_("): "));
622 }
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
625 }
626
627 /* A range of source lines. */
628
629 struct btrace_line_range
630 {
631 /* The symtab this line is from. */
632 struct symtab *symtab;
633
634 /* The first line (inclusive). */
635 int begin;
636
637 /* The last line (exclusive). */
638 int end;
639 };
640
641 /* Construct a line range. */
642
643 static struct btrace_line_range
644 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
645 {
646 struct btrace_line_range range;
647
648 range.symtab = symtab;
649 range.begin = begin;
650 range.end = end;
651
652 return range;
653 }
654
655 /* Add a line to a line range. */
656
657 static struct btrace_line_range
658 btrace_line_range_add (struct btrace_line_range range, int line)
659 {
660 if (range.end <= range.begin)
661 {
662 /* This is the first entry. */
663 range.begin = line;
664 range.end = line + 1;
665 }
666 else if (line < range.begin)
667 range.begin = line;
668 else if (range.end < line)
669 range.end = line;
670
671 return range;
672 }
673
674 /* Return non-zero if RANGE is empty, zero otherwise. */
675
676 static int
677 btrace_line_range_is_empty (struct btrace_line_range range)
678 {
679 return range.end <= range.begin;
680 }
681
682 /* Return non-zero if LHS contains RHS, zero otherwise. */
683
684 static int
685 btrace_line_range_contains_range (struct btrace_line_range lhs,
686 struct btrace_line_range rhs)
687 {
688 return ((lhs.symtab == rhs.symtab)
689 && (lhs.begin <= rhs.begin)
690 && (rhs.end <= lhs.end));
691 }
692
693 /* Find the line range associated with PC. */
694
695 static struct btrace_line_range
696 btrace_find_line_range (CORE_ADDR pc)
697 {
698 struct btrace_line_range range;
699 struct linetable_entry *lines;
700 struct linetable *ltable;
701 struct symtab *symtab;
702 int nlines, i;
703
704 symtab = find_pc_line_symtab (pc);
705 if (symtab == NULL)
706 return btrace_mk_line_range (NULL, 0, 0);
707
708 ltable = SYMTAB_LINETABLE (symtab);
709 if (ltable == NULL)
710 return btrace_mk_line_range (symtab, 0, 0);
711
712 nlines = ltable->nitems;
713 lines = ltable->item;
714 if (nlines <= 0)
715 return btrace_mk_line_range (symtab, 0, 0);
716
717 range = btrace_mk_line_range (symtab, 0, 0);
718 for (i = 0; i < nlines - 1; i++)
719 {
720 if ((lines[i].pc == pc) && (lines[i].line != 0))
721 range = btrace_line_range_add (range, lines[i].line);
722 }
723
724 return range;
725 }
726
727 /* Print source lines in LINES to UIOUT.
728
729 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
730 instructions corresponding to that source line. When printing a new source
731 line, we do the cleanups for the open chain and open a new cleanup chain for
732 the new source line. If the source line range in LINES is not empty, this
733 function will leave the cleanup chain for the last printed source line open
734 so instructions can be added to it. */
735
736 static void
737 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
738 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
739 gdb::optional<ui_out_emit_list> *asm_list,
740 gdb_disassembly_flags flags)
741 {
742 print_source_lines_flags psl_flags;
743
744 if (flags & DISASSEMBLY_FILENAME)
745 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
746
747 for (int line = lines.begin; line < lines.end; ++line)
748 {
749 asm_list->reset ();
750
751 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
752
753 print_source_lines (lines.symtab, line, line + 1, psl_flags);
754
755 asm_list->emplace (uiout, "line_asm_insn");
756 }
757 }
758
759 /* Disassemble a section of the recorded instruction trace. */
760
761 static void
762 btrace_insn_history (struct ui_out *uiout,
763 const struct btrace_thread_info *btinfo,
764 const struct btrace_insn_iterator *begin,
765 const struct btrace_insn_iterator *end,
766 gdb_disassembly_flags flags)
767 {
768 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
769 btrace_insn_number (begin), btrace_insn_number (end));
770
771 flags |= DISASSEMBLY_SPECULATIVE;
772
773 struct gdbarch *gdbarch = target_gdbarch ();
774 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
775
776 ui_out_emit_list list_emitter (uiout, "asm_insns");
777
778 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
779 gdb::optional<ui_out_emit_list> asm_list;
780
781 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
782
783 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
784 btrace_insn_next (&it, 1))
785 {
786 const struct btrace_insn *insn;
787
788 insn = btrace_insn_get (&it);
789
790 /* A NULL instruction indicates a gap in the trace. */
791 if (insn == NULL)
792 {
793 const struct btrace_config *conf;
794
795 conf = btrace_conf (btinfo);
796
797 /* We have trace so we must have a configuration. */
798 gdb_assert (conf != NULL);
799
800 uiout->field_fmt ("insn-number", "%u",
801 btrace_insn_number (&it));
802 uiout->text ("\t");
803
804 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
805 conf->format);
806 }
807 else
808 {
809 struct disasm_insn dinsn;
810
811 if ((flags & DISASSEMBLY_SOURCE) != 0)
812 {
813 struct btrace_line_range lines;
814
815 lines = btrace_find_line_range (insn->pc);
816 if (!btrace_line_range_is_empty (lines)
817 && !btrace_line_range_contains_range (last_lines, lines))
818 {
819 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
820 flags);
821 last_lines = lines;
822 }
823 else if (!src_and_asm_tuple.has_value ())
824 {
825 gdb_assert (!asm_list.has_value ());
826
827 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
828
829 /* No source information. */
830 asm_list.emplace (uiout, "line_asm_insn");
831 }
832
833 gdb_assert (src_and_asm_tuple.has_value ());
834 gdb_assert (asm_list.has_value ());
835 }
836
837 memset (&dinsn, 0, sizeof (dinsn));
838 dinsn.number = btrace_insn_number (&it);
839 dinsn.addr = insn->pc;
840
841 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
842 dinsn.is_speculative = 1;
843
844 disasm.pretty_print_insn (&dinsn, flags);
845 }
846 }
847 }
848
849 /* The insn_history method of target record-btrace. */
850
851 void
852 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
853 {
854 struct btrace_thread_info *btinfo;
855 struct btrace_insn_history *history;
856 struct btrace_insn_iterator begin, end;
857 struct ui_out *uiout;
858 unsigned int context, covered;
859
860 uiout = current_uiout;
861 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
862 context = abs (size);
863 if (context == 0)
864 error (_("Bad record instruction-history-size."));
865
866 btinfo = require_btrace ();
867 history = btinfo->insn_history;
868 if (history == NULL)
869 {
870 struct btrace_insn_iterator *replay;
871
872 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
873
874 /* If we're replaying, we start at the replay position. Otherwise, we
875 start at the tail of the trace. */
876 replay = btinfo->replay;
877 if (replay != NULL)
878 begin = *replay;
879 else
880 btrace_insn_end (&begin, btinfo);
881
882 /* We start from here and expand in the requested direction. Then we
883 expand in the other direction, as well, to fill up any remaining
884 context. */
885 end = begin;
886 if (size < 0)
887 {
888 /* We want the current position covered, as well. */
889 covered = btrace_insn_next (&end, 1);
890 covered += btrace_insn_prev (&begin, context - covered);
891 covered += btrace_insn_next (&end, context - covered);
892 }
893 else
894 {
895 covered = btrace_insn_next (&end, context);
896 covered += btrace_insn_prev (&begin, context - covered);
897 }
898 }
899 else
900 {
901 begin = history->begin;
902 end = history->end;
903
904 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
905 btrace_insn_number (&begin), btrace_insn_number (&end));
906
907 if (size < 0)
908 {
909 end = begin;
910 covered = btrace_insn_prev (&begin, context);
911 }
912 else
913 {
914 begin = end;
915 covered = btrace_insn_next (&end, context);
916 }
917 }
918
919 if (covered > 0)
920 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
921 else
922 {
923 if (size < 0)
924 printf_unfiltered (_("At the start of the branch trace record.\n"));
925 else
926 printf_unfiltered (_("At the end of the branch trace record.\n"));
927 }
928
929 btrace_set_insn_history (btinfo, &begin, &end);
930 }
931
932 /* The insn_history_range method of target record-btrace. */
933
934 void
935 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
936 gdb_disassembly_flags flags)
937 {
938 struct btrace_thread_info *btinfo;
939 struct btrace_insn_iterator begin, end;
940 struct ui_out *uiout;
941 unsigned int low, high;
942 int found;
943
944 uiout = current_uiout;
945 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
946 low = from;
947 high = to;
948
949 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
950
951 /* Check for wrap-arounds. */
952 if (low != from || high != to)
953 error (_("Bad range."));
954
955 if (high < low)
956 error (_("Bad range."));
957
958 btinfo = require_btrace ();
959
960 found = btrace_find_insn_by_number (&begin, btinfo, low);
961 if (found == 0)
962 error (_("Range out of bounds."));
963
964 found = btrace_find_insn_by_number (&end, btinfo, high);
965 if (found == 0)
966 {
967 /* Silently truncate the range. */
968 btrace_insn_end (&end, btinfo);
969 }
970 else
971 {
972 /* We want both begin and end to be inclusive. */
973 btrace_insn_next (&end, 1);
974 }
975
976 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
977 btrace_set_insn_history (btinfo, &begin, &end);
978 }
979
980 /* The insn_history_from method of target record-btrace. */
981
982 void
983 record_btrace_target::insn_history_from (ULONGEST from, int size,
984 gdb_disassembly_flags flags)
985 {
986 ULONGEST begin, end, context;
987
988 context = abs (size);
989 if (context == 0)
990 error (_("Bad record instruction-history-size."));
991
992 if (size < 0)
993 {
994 end = from;
995
996 if (from < context)
997 begin = 0;
998 else
999 begin = from - context + 1;
1000 }
1001 else
1002 {
1003 begin = from;
1004 end = from + context - 1;
1005
1006 /* Check for wrap-around. */
1007 if (end < begin)
1008 end = ULONGEST_MAX;
1009 }
1010
1011 insn_history_range (begin, end, flags);
1012 }
1013
1014 /* Print the instruction number range for a function call history line. */
1015
1016 static void
1017 btrace_call_history_insn_range (struct ui_out *uiout,
1018 const struct btrace_function *bfun)
1019 {
1020 unsigned int begin, end, size;
1021
1022 size = bfun->insn.size ();
1023 gdb_assert (size > 0);
1024
1025 begin = bfun->insn_offset;
1026 end = begin + size - 1;
1027
1028 uiout->field_unsigned ("insn begin", begin);
1029 uiout->text (",");
1030 uiout->field_unsigned ("insn end", end);
1031 }
1032
1033 /* Compute the lowest and highest source line for the instructions in BFUN
1034 and return them in PBEGIN and PEND.
1035 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1036 result from inlining or macro expansion. */
1037
1038 static void
1039 btrace_compute_src_line_range (const struct btrace_function *bfun,
1040 int *pbegin, int *pend)
1041 {
1042 struct symtab *symtab;
1043 struct symbol *sym;
1044 int begin, end;
1045
1046 begin = INT_MAX;
1047 end = INT_MIN;
1048
1049 sym = bfun->sym;
1050 if (sym == NULL)
1051 goto out;
1052
1053 symtab = symbol_symtab (sym);
1054
1055 for (const btrace_insn &insn : bfun->insn)
1056 {
1057 struct symtab_and_line sal;
1058
1059 sal = find_pc_line (insn.pc, 0);
1060 if (sal.symtab != symtab || sal.line == 0)
1061 continue;
1062
1063 begin = std::min (begin, sal.line);
1064 end = std::max (end, sal.line);
1065 }
1066
1067 out:
1068 *pbegin = begin;
1069 *pend = end;
1070 }
1071
1072 /* Print the source line information for a function call history line. */
1073
1074 static void
1075 btrace_call_history_src_line (struct ui_out *uiout,
1076 const struct btrace_function *bfun)
1077 {
1078 struct symbol *sym;
1079 int begin, end;
1080
1081 sym = bfun->sym;
1082 if (sym == NULL)
1083 return;
1084
1085 uiout->field_string ("file",
1086 symtab_to_filename_for_display (symbol_symtab (sym)),
1087 ui_out_style_kind::FILE);
1088
1089 btrace_compute_src_line_range (bfun, &begin, &end);
1090 if (end < begin)
1091 return;
1092
1093 uiout->text (":");
1094 uiout->field_signed ("min line", begin);
1095
1096 if (end == begin)
1097 return;
1098
1099 uiout->text (",");
1100 uiout->field_signed ("max line", end);
1101 }
1102
1103 /* Get the name of a branch trace function. */
1104
1105 static const char *
1106 btrace_get_bfun_name (const struct btrace_function *bfun)
1107 {
1108 struct minimal_symbol *msym;
1109 struct symbol *sym;
1110
1111 if (bfun == NULL)
1112 return "??";
1113
1114 msym = bfun->msym;
1115 sym = bfun->sym;
1116
1117 if (sym != NULL)
1118 return SYMBOL_PRINT_NAME (sym);
1119 else if (msym != NULL)
1120 return MSYMBOL_PRINT_NAME (msym);
1121 else
1122 return "??";
1123 }
1124
1125 /* Disassemble a section of the recorded function trace. */
1126
1127 static void
1128 btrace_call_history (struct ui_out *uiout,
1129 const struct btrace_thread_info *btinfo,
1130 const struct btrace_call_iterator *begin,
1131 const struct btrace_call_iterator *end,
1132 int int_flags)
1133 {
1134 struct btrace_call_iterator it;
1135 record_print_flags flags = (enum record_print_flag) int_flags;
1136
1137 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1138 btrace_call_number (end));
1139
1140 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1141 {
1142 const struct btrace_function *bfun;
1143 struct minimal_symbol *msym;
1144 struct symbol *sym;
1145
1146 bfun = btrace_call_get (&it);
1147 sym = bfun->sym;
1148 msym = bfun->msym;
1149
1150 /* Print the function index. */
1151 uiout->field_unsigned ("index", bfun->number);
1152 uiout->text ("\t");
1153
1154 /* Indicate gaps in the trace. */
1155 if (bfun->errcode != 0)
1156 {
1157 const struct btrace_config *conf;
1158
1159 conf = btrace_conf (btinfo);
1160
1161 /* We have trace so we must have a configuration. */
1162 gdb_assert (conf != NULL);
1163
1164 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1165
1166 continue;
1167 }
1168
1169 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1170 {
1171 int level = bfun->level + btinfo->level, i;
1172
1173 for (i = 0; i < level; ++i)
1174 uiout->text (" ");
1175 }
1176
1177 if (sym != NULL)
1178 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1179 ui_out_style_kind::FUNCTION);
1180 else if (msym != NULL)
1181 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1182 ui_out_style_kind::FUNCTION);
1183 else if (!uiout->is_mi_like_p ())
1184 uiout->field_string ("function", "??",
1185 ui_out_style_kind::FUNCTION);
1186
1187 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1188 {
1189 uiout->text (_("\tinst "));
1190 btrace_call_history_insn_range (uiout, bfun);
1191 }
1192
1193 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1194 {
1195 uiout->text (_("\tat "));
1196 btrace_call_history_src_line (uiout, bfun);
1197 }
1198
1199 uiout->text ("\n");
1200 }
1201 }
1202
1203 /* The call_history method of target record-btrace. */
1204
1205 void
1206 record_btrace_target::call_history (int size, record_print_flags flags)
1207 {
1208 struct btrace_thread_info *btinfo;
1209 struct btrace_call_history *history;
1210 struct btrace_call_iterator begin, end;
1211 struct ui_out *uiout;
1212 unsigned int context, covered;
1213
1214 uiout = current_uiout;
1215 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1216 context = abs (size);
1217 if (context == 0)
1218 error (_("Bad record function-call-history-size."));
1219
1220 btinfo = require_btrace ();
1221 history = btinfo->call_history;
1222 if (history == NULL)
1223 {
1224 struct btrace_insn_iterator *replay;
1225
1226 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1227
1228 /* If we're replaying, we start at the replay position. Otherwise, we
1229 start at the tail of the trace. */
1230 replay = btinfo->replay;
1231 if (replay != NULL)
1232 {
1233 begin.btinfo = btinfo;
1234 begin.index = replay->call_index;
1235 }
1236 else
1237 btrace_call_end (&begin, btinfo);
1238
1239 /* We start from here and expand in the requested direction. Then we
1240 expand in the other direction, as well, to fill up any remaining
1241 context. */
1242 end = begin;
1243 if (size < 0)
1244 {
1245 /* We want the current position covered, as well. */
1246 covered = btrace_call_next (&end, 1);
1247 covered += btrace_call_prev (&begin, context - covered);
1248 covered += btrace_call_next (&end, context - covered);
1249 }
1250 else
1251 {
1252 covered = btrace_call_next (&end, context);
1253 covered += btrace_call_prev (&begin, context- covered);
1254 }
1255 }
1256 else
1257 {
1258 begin = history->begin;
1259 end = history->end;
1260
1261 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1262 btrace_call_number (&begin), btrace_call_number (&end));
1263
1264 if (size < 0)
1265 {
1266 end = begin;
1267 covered = btrace_call_prev (&begin, context);
1268 }
1269 else
1270 {
1271 begin = end;
1272 covered = btrace_call_next (&end, context);
1273 }
1274 }
1275
1276 if (covered > 0)
1277 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1278 else
1279 {
1280 if (size < 0)
1281 printf_unfiltered (_("At the start of the branch trace record.\n"));
1282 else
1283 printf_unfiltered (_("At the end of the branch trace record.\n"));
1284 }
1285
1286 btrace_set_call_history (btinfo, &begin, &end);
1287 }
1288
1289 /* The call_history_range method of target record-btrace. */
1290
1291 void
1292 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1293 record_print_flags flags)
1294 {
1295 struct btrace_thread_info *btinfo;
1296 struct btrace_call_iterator begin, end;
1297 struct ui_out *uiout;
1298 unsigned int low, high;
1299 int found;
1300
1301 uiout = current_uiout;
1302 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1303 low = from;
1304 high = to;
1305
1306 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1307
1308 /* Check for wrap-arounds. */
1309 if (low != from || high != to)
1310 error (_("Bad range."));
1311
1312 if (high < low)
1313 error (_("Bad range."));
1314
1315 btinfo = require_btrace ();
1316
1317 found = btrace_find_call_by_number (&begin, btinfo, low);
1318 if (found == 0)
1319 error (_("Range out of bounds."));
1320
1321 found = btrace_find_call_by_number (&end, btinfo, high);
1322 if (found == 0)
1323 {
1324 /* Silently truncate the range. */
1325 btrace_call_end (&end, btinfo);
1326 }
1327 else
1328 {
1329 /* We want both begin and end to be inclusive. */
1330 btrace_call_next (&end, 1);
1331 }
1332
1333 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1334 btrace_set_call_history (btinfo, &begin, &end);
1335 }
1336
1337 /* The call_history_from method of target record-btrace. */
1338
1339 void
1340 record_btrace_target::call_history_from (ULONGEST from, int size,
1341 record_print_flags flags)
1342 {
1343 ULONGEST begin, end, context;
1344
1345 context = abs (size);
1346 if (context == 0)
1347 error (_("Bad record function-call-history-size."));
1348
1349 if (size < 0)
1350 {
1351 end = from;
1352
1353 if (from < context)
1354 begin = 0;
1355 else
1356 begin = from - context + 1;
1357 }
1358 else
1359 {
1360 begin = from;
1361 end = from + context - 1;
1362
1363 /* Check for wrap-around. */
1364 if (end < begin)
1365 end = ULONGEST_MAX;
1366 }
1367
1368 call_history_range ( begin, end, flags);
1369 }
1370
1371 /* The record_method method of target record-btrace. */
1372
1373 enum record_method
1374 record_btrace_target::record_method (ptid_t ptid)
1375 {
1376 struct thread_info * const tp = find_thread_ptid (ptid);
1377
1378 if (tp == NULL)
1379 error (_("No thread."));
1380
1381 if (tp->btrace.target == NULL)
1382 return RECORD_METHOD_NONE;
1383
1384 return RECORD_METHOD_BTRACE;
1385 }
1386
1387 /* The record_is_replaying method of target record-btrace. */
1388
1389 bool
1390 record_btrace_target::record_is_replaying (ptid_t ptid)
1391 {
1392 for (thread_info *tp : all_non_exited_threads (ptid))
1393 if (btrace_is_replaying (tp))
1394 return true;
1395
1396 return false;
1397 }
1398
1399 /* The record_will_replay method of target record-btrace. */
1400
1401 bool
1402 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1403 {
1404 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1405 }
1406
1407 /* The xfer_partial method of target record-btrace. */
1408
1409 enum target_xfer_status
1410 record_btrace_target::xfer_partial (enum target_object object,
1411 const char *annex, gdb_byte *readbuf,
1412 const gdb_byte *writebuf, ULONGEST offset,
1413 ULONGEST len, ULONGEST *xfered_len)
1414 {
1415 /* Filter out requests that don't make sense during replay. */
1416 if (replay_memory_access == replay_memory_access_read_only
1417 && !record_btrace_generating_corefile
1418 && record_is_replaying (inferior_ptid))
1419 {
1420 switch (object)
1421 {
1422 case TARGET_OBJECT_MEMORY:
1423 {
1424 struct target_section *section;
1425
1426 /* We do not allow writing memory in general. */
1427 if (writebuf != NULL)
1428 {
1429 *xfered_len = len;
1430 return TARGET_XFER_UNAVAILABLE;
1431 }
1432
1433 /* We allow reading readonly memory. */
1434 section = target_section_by_addr (this, offset);
1435 if (section != NULL)
1436 {
1437 /* Check if the section we found is readonly. */
1438 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1439 section->the_bfd_section)
1440 & SEC_READONLY) != 0)
1441 {
1442 /* Truncate the request to fit into this section. */
1443 len = std::min (len, section->endaddr - offset);
1444 break;
1445 }
1446 }
1447
1448 *xfered_len = len;
1449 return TARGET_XFER_UNAVAILABLE;
1450 }
1451 }
1452 }
1453
1454 /* Forward the request. */
1455 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1456 offset, len, xfered_len);
1457 }
1458
1459 /* The insert_breakpoint method of target record-btrace. */
1460
1461 int
1462 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1463 struct bp_target_info *bp_tgt)
1464 {
1465 const char *old;
1466 int ret;
1467
1468 /* Inserting breakpoints requires accessing memory. Allow it for the
1469 duration of this function. */
1470 old = replay_memory_access;
1471 replay_memory_access = replay_memory_access_read_write;
1472
1473 ret = 0;
1474 try
1475 {
1476 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1477 }
1478 catch (const gdb_exception &except)
1479 {
1480 replay_memory_access = old;
1481 throw;
1482 }
1483 replay_memory_access = old;
1484
1485 return ret;
1486 }
1487
1488 /* The remove_breakpoint method of target record-btrace. */
1489
1490 int
1491 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1492 struct bp_target_info *bp_tgt,
1493 enum remove_bp_reason reason)
1494 {
1495 const char *old;
1496 int ret;
1497
1498 /* Removing breakpoints requires accessing memory. Allow it for the
1499 duration of this function. */
1500 old = replay_memory_access;
1501 replay_memory_access = replay_memory_access_read_write;
1502
1503 ret = 0;
1504 try
1505 {
1506 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1507 }
1508 catch (const gdb_exception &except)
1509 {
1510 replay_memory_access = old;
1511 throw;
1512 }
1513 replay_memory_access = old;
1514
1515 return ret;
1516 }
1517
1518 /* The fetch_registers method of target record-btrace. */
1519
1520 void
1521 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1522 {
1523 struct btrace_insn_iterator *replay;
1524 struct thread_info *tp;
1525
1526 tp = find_thread_ptid (regcache->ptid ());
1527 gdb_assert (tp != NULL);
1528
1529 replay = tp->btrace.replay;
1530 if (replay != NULL && !record_btrace_generating_corefile)
1531 {
1532 const struct btrace_insn *insn;
1533 struct gdbarch *gdbarch;
1534 int pcreg;
1535
1536 gdbarch = regcache->arch ();
1537 pcreg = gdbarch_pc_regnum (gdbarch);
1538 if (pcreg < 0)
1539 return;
1540
1541 /* We can only provide the PC register. */
1542 if (regno >= 0 && regno != pcreg)
1543 return;
1544
1545 insn = btrace_insn_get (replay);
1546 gdb_assert (insn != NULL);
1547
1548 regcache->raw_supply (regno, &insn->pc);
1549 }
1550 else
1551 this->beneath ()->fetch_registers (regcache, regno);
1552 }
1553
1554 /* The store_registers method of target record-btrace. */
1555
1556 void
1557 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1558 {
1559 if (!record_btrace_generating_corefile
1560 && record_is_replaying (regcache->ptid ()))
1561 error (_("Cannot write registers while replaying."));
1562
1563 gdb_assert (may_write_registers);
1564
1565 this->beneath ()->store_registers (regcache, regno);
1566 }
1567
1568 /* The prepare_to_store method of target record-btrace. */
1569
1570 void
1571 record_btrace_target::prepare_to_store (struct regcache *regcache)
1572 {
1573 if (!record_btrace_generating_corefile
1574 && record_is_replaying (regcache->ptid ()))
1575 return;
1576
1577 this->beneath ()->prepare_to_store (regcache);
1578 }
1579
1580 /* The branch trace frame cache. */
1581
1582 struct btrace_frame_cache
1583 {
1584 /* The thread. */
1585 struct thread_info *tp;
1586
1587 /* The frame info. */
1588 struct frame_info *frame;
1589
1590 /* The branch trace function segment. */
1591 const struct btrace_function *bfun;
1592 };
1593
1594 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1595
1596 static htab_t bfcache;
1597
1598 /* hash_f for htab_create_alloc of bfcache. */
1599
1600 static hashval_t
1601 bfcache_hash (const void *arg)
1602 {
1603 const struct btrace_frame_cache *cache
1604 = (const struct btrace_frame_cache *) arg;
1605
1606 return htab_hash_pointer (cache->frame);
1607 }
1608
1609 /* eq_f for htab_create_alloc of bfcache. */
1610
1611 static int
1612 bfcache_eq (const void *arg1, const void *arg2)
1613 {
1614 const struct btrace_frame_cache *cache1
1615 = (const struct btrace_frame_cache *) arg1;
1616 const struct btrace_frame_cache *cache2
1617 = (const struct btrace_frame_cache *) arg2;
1618
1619 return cache1->frame == cache2->frame;
1620 }
1621
1622 /* Create a new btrace frame cache. */
1623
1624 static struct btrace_frame_cache *
1625 bfcache_new (struct frame_info *frame)
1626 {
1627 struct btrace_frame_cache *cache;
1628 void **slot;
1629
1630 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1631 cache->frame = frame;
1632
1633 slot = htab_find_slot (bfcache, cache, INSERT);
1634 gdb_assert (*slot == NULL);
1635 *slot = cache;
1636
1637 return cache;
1638 }
1639
1640 /* Extract the branch trace function from a branch trace frame. */
1641
1642 static const struct btrace_function *
1643 btrace_get_frame_function (struct frame_info *frame)
1644 {
1645 const struct btrace_frame_cache *cache;
1646 struct btrace_frame_cache pattern;
1647 void **slot;
1648
1649 pattern.frame = frame;
1650
1651 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1652 if (slot == NULL)
1653 return NULL;
1654
1655 cache = (const struct btrace_frame_cache *) *slot;
1656 return cache->bfun;
1657 }
1658
1659 /* Implement stop_reason method for record_btrace_frame_unwind. */
1660
1661 static enum unwind_stop_reason
1662 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1663 void **this_cache)
1664 {
1665 const struct btrace_frame_cache *cache;
1666 const struct btrace_function *bfun;
1667
1668 cache = (const struct btrace_frame_cache *) *this_cache;
1669 bfun = cache->bfun;
1670 gdb_assert (bfun != NULL);
1671
1672 if (bfun->up == 0)
1673 return UNWIND_UNAVAILABLE;
1674
1675 return UNWIND_NO_REASON;
1676 }
1677
1678 /* Implement this_id method for record_btrace_frame_unwind. */
1679
1680 static void
1681 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1682 struct frame_id *this_id)
1683 {
1684 const struct btrace_frame_cache *cache;
1685 const struct btrace_function *bfun;
1686 struct btrace_call_iterator it;
1687 CORE_ADDR code, special;
1688
1689 cache = (const struct btrace_frame_cache *) *this_cache;
1690
1691 bfun = cache->bfun;
1692 gdb_assert (bfun != NULL);
1693
1694 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1695 bfun = btrace_call_get (&it);
1696
1697 code = get_frame_func (this_frame);
1698 special = bfun->number;
1699
1700 *this_id = frame_id_build_unavailable_stack_special (code, special);
1701
1702 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1703 btrace_get_bfun_name (cache->bfun),
1704 core_addr_to_string_nz (this_id->code_addr),
1705 core_addr_to_string_nz (this_id->special_addr));
1706 }
1707
1708 /* Implement prev_register method for record_btrace_frame_unwind. */
1709
1710 static struct value *
1711 record_btrace_frame_prev_register (struct frame_info *this_frame,
1712 void **this_cache,
1713 int regnum)
1714 {
1715 const struct btrace_frame_cache *cache;
1716 const struct btrace_function *bfun, *caller;
1717 struct btrace_call_iterator it;
1718 struct gdbarch *gdbarch;
1719 CORE_ADDR pc;
1720 int pcreg;
1721
1722 gdbarch = get_frame_arch (this_frame);
1723 pcreg = gdbarch_pc_regnum (gdbarch);
1724 if (pcreg < 0 || regnum != pcreg)
1725 throw_error (NOT_AVAILABLE_ERROR,
1726 _("Registers are not available in btrace record history"));
1727
1728 cache = (const struct btrace_frame_cache *) *this_cache;
1729 bfun = cache->bfun;
1730 gdb_assert (bfun != NULL);
1731
1732 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1733 throw_error (NOT_AVAILABLE_ERROR,
1734 _("No caller in btrace record history"));
1735
1736 caller = btrace_call_get (&it);
1737
1738 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1739 pc = caller->insn.front ().pc;
1740 else
1741 {
1742 pc = caller->insn.back ().pc;
1743 pc += gdb_insn_length (gdbarch, pc);
1744 }
1745
1746 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1747 btrace_get_bfun_name (bfun), bfun->level,
1748 core_addr_to_string_nz (pc));
1749
1750 return frame_unwind_got_address (this_frame, regnum, pc);
1751 }
1752
1753 /* Implement sniffer method for record_btrace_frame_unwind. */
1754
1755 static int
1756 record_btrace_frame_sniffer (const struct frame_unwind *self,
1757 struct frame_info *this_frame,
1758 void **this_cache)
1759 {
1760 const struct btrace_function *bfun;
1761 struct btrace_frame_cache *cache;
1762 struct thread_info *tp;
1763 struct frame_info *next;
1764
1765 /* THIS_FRAME does not contain a reference to its thread. */
1766 tp = inferior_thread ();
1767
1768 bfun = NULL;
1769 next = get_next_frame (this_frame);
1770 if (next == NULL)
1771 {
1772 const struct btrace_insn_iterator *replay;
1773
1774 replay = tp->btrace.replay;
1775 if (replay != NULL)
1776 bfun = &replay->btinfo->functions[replay->call_index];
1777 }
1778 else
1779 {
1780 const struct btrace_function *callee;
1781 struct btrace_call_iterator it;
1782
1783 callee = btrace_get_frame_function (next);
1784 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1785 return 0;
1786
1787 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1788 return 0;
1789
1790 bfun = btrace_call_get (&it);
1791 }
1792
1793 if (bfun == NULL)
1794 return 0;
1795
1796 DEBUG ("[frame] sniffed frame for %s on level %d",
1797 btrace_get_bfun_name (bfun), bfun->level);
1798
1799 /* This is our frame. Initialize the frame cache. */
1800 cache = bfcache_new (this_frame);
1801 cache->tp = tp;
1802 cache->bfun = bfun;
1803
1804 *this_cache = cache;
1805 return 1;
1806 }
1807
1808 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1809
1810 static int
1811 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1812 struct frame_info *this_frame,
1813 void **this_cache)
1814 {
1815 const struct btrace_function *bfun, *callee;
1816 struct btrace_frame_cache *cache;
1817 struct btrace_call_iterator it;
1818 struct frame_info *next;
1819 struct thread_info *tinfo;
1820
1821 next = get_next_frame (this_frame);
1822 if (next == NULL)
1823 return 0;
1824
1825 callee = btrace_get_frame_function (next);
1826 if (callee == NULL)
1827 return 0;
1828
1829 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1830 return 0;
1831
1832 tinfo = inferior_thread ();
1833 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1834 return 0;
1835
1836 bfun = btrace_call_get (&it);
1837
1838 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1839 btrace_get_bfun_name (bfun), bfun->level);
1840
1841 /* This is our frame. Initialize the frame cache. */
1842 cache = bfcache_new (this_frame);
1843 cache->tp = tinfo;
1844 cache->bfun = bfun;
1845
1846 *this_cache = cache;
1847 return 1;
1848 }
1849
1850 static void
1851 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1852 {
1853 struct btrace_frame_cache *cache;
1854 void **slot;
1855
1856 cache = (struct btrace_frame_cache *) this_cache;
1857
1858 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1859 gdb_assert (slot != NULL);
1860
1861 htab_remove_elt (bfcache, cache);
1862 }
1863
1864 /* btrace recording does not store previous memory content, neither the stack
1865 frames content. Any unwinding would return errorneous results as the stack
1866 contents no longer matches the changed PC value restored from history.
1867 Therefore this unwinder reports any possibly unwound registers as
1868 <unavailable>. */
1869
1870 const struct frame_unwind record_btrace_frame_unwind =
1871 {
1872 NORMAL_FRAME,
1873 record_btrace_frame_unwind_stop_reason,
1874 record_btrace_frame_this_id,
1875 record_btrace_frame_prev_register,
1876 NULL,
1877 record_btrace_frame_sniffer,
1878 record_btrace_frame_dealloc_cache
1879 };
1880
1881 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1882 {
1883 TAILCALL_FRAME,
1884 record_btrace_frame_unwind_stop_reason,
1885 record_btrace_frame_this_id,
1886 record_btrace_frame_prev_register,
1887 NULL,
1888 record_btrace_tailcall_frame_sniffer,
1889 record_btrace_frame_dealloc_cache
1890 };
1891
1892 /* Implement the get_unwinder method. */
1893
1894 const struct frame_unwind *
1895 record_btrace_target::get_unwinder ()
1896 {
1897 return &record_btrace_frame_unwind;
1898 }
1899
1900 /* Implement the get_tailcall_unwinder method. */
1901
1902 const struct frame_unwind *
1903 record_btrace_target::get_tailcall_unwinder ()
1904 {
1905 return &record_btrace_tailcall_frame_unwind;
1906 }
1907
1908 /* Return a human-readable string for FLAG. */
1909
1910 static const char *
1911 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1912 {
1913 switch (flag)
1914 {
1915 case BTHR_STEP:
1916 return "step";
1917
1918 case BTHR_RSTEP:
1919 return "reverse-step";
1920
1921 case BTHR_CONT:
1922 return "cont";
1923
1924 case BTHR_RCONT:
1925 return "reverse-cont";
1926
1927 case BTHR_STOP:
1928 return "stop";
1929 }
1930
1931 return "<invalid>";
1932 }
1933
1934 /* Indicate that TP should be resumed according to FLAG. */
1935
1936 static void
1937 record_btrace_resume_thread (struct thread_info *tp,
1938 enum btrace_thread_flag flag)
1939 {
1940 struct btrace_thread_info *btinfo;
1941
1942 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1943 target_pid_to_str (tp->ptid).c_str (), flag,
1944 btrace_thread_flag_to_str (flag));
1945
1946 btinfo = &tp->btrace;
1947
1948 /* Fetch the latest branch trace. */
1949 btrace_fetch (tp, record_btrace_get_cpu ());
1950
1951 /* A resume request overwrites a preceding resume or stop request. */
1952 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1953 btinfo->flags |= flag;
1954 }
1955
1956 /* Get the current frame for TP. */
1957
1958 static struct frame_id
1959 get_thread_current_frame_id (struct thread_info *tp)
1960 {
1961 struct frame_id id;
1962 int executing;
1963
1964 /* Set current thread, which is implicitly used by
1965 get_current_frame. */
1966 scoped_restore_current_thread restore_thread;
1967
1968 switch_to_thread (tp);
1969
1970 /* Clear the executing flag to allow changes to the current frame.
1971 We are not actually running, yet. We just started a reverse execution
1972 command or a record goto command.
1973 For the latter, EXECUTING is false and this has no effect.
1974 For the former, EXECUTING is true and we're in wait, about to
1975 move the thread. Since we need to recompute the stack, we temporarily
1976 set EXECUTING to flase. */
1977 executing = tp->executing;
1978 set_executing (inferior_ptid, false);
1979
1980 id = null_frame_id;
1981 try
1982 {
1983 id = get_frame_id (get_current_frame ());
1984 }
1985 catch (const gdb_exception &except)
1986 {
1987 /* Restore the previous execution state. */
1988 set_executing (inferior_ptid, executing);
1989
1990 throw;
1991 }
1992
1993 /* Restore the previous execution state. */
1994 set_executing (inferior_ptid, executing);
1995
1996 return id;
1997 }
1998
1999 /* Start replaying a thread. */
2000
2001 static struct btrace_insn_iterator *
2002 record_btrace_start_replaying (struct thread_info *tp)
2003 {
2004 struct btrace_insn_iterator *replay;
2005 struct btrace_thread_info *btinfo;
2006
2007 btinfo = &tp->btrace;
2008 replay = NULL;
2009
2010 /* We can't start replaying without trace. */
2011 if (btinfo->functions.empty ())
2012 return NULL;
2013
2014 /* GDB stores the current frame_id when stepping in order to detects steps
2015 into subroutines.
2016 Since frames are computed differently when we're replaying, we need to
2017 recompute those stored frames and fix them up so we can still detect
2018 subroutines after we started replaying. */
2019 try
2020 {
2021 struct frame_id frame_id;
2022 int upd_step_frame_id, upd_step_stack_frame_id;
2023
2024 /* The current frame without replaying - computed via normal unwind. */
2025 frame_id = get_thread_current_frame_id (tp);
2026
2027 /* Check if we need to update any stepping-related frame id's. */
2028 upd_step_frame_id = frame_id_eq (frame_id,
2029 tp->control.step_frame_id);
2030 upd_step_stack_frame_id = frame_id_eq (frame_id,
2031 tp->control.step_stack_frame_id);
2032
2033 /* We start replaying at the end of the branch trace. This corresponds
2034 to the current instruction. */
2035 replay = XNEW (struct btrace_insn_iterator);
2036 btrace_insn_end (replay, btinfo);
2037
2038 /* Skip gaps at the end of the trace. */
2039 while (btrace_insn_get (replay) == NULL)
2040 {
2041 unsigned int steps;
2042
2043 steps = btrace_insn_prev (replay, 1);
2044 if (steps == 0)
2045 error (_("No trace."));
2046 }
2047
2048 /* We're not replaying, yet. */
2049 gdb_assert (btinfo->replay == NULL);
2050 btinfo->replay = replay;
2051
2052 /* Make sure we're not using any stale registers. */
2053 registers_changed_thread (tp);
2054
2055 /* The current frame with replaying - computed via btrace unwind. */
2056 frame_id = get_thread_current_frame_id (tp);
2057
2058 /* Replace stepping related frames where necessary. */
2059 if (upd_step_frame_id)
2060 tp->control.step_frame_id = frame_id;
2061 if (upd_step_stack_frame_id)
2062 tp->control.step_stack_frame_id = frame_id;
2063 }
2064 catch (const gdb_exception &except)
2065 {
2066 xfree (btinfo->replay);
2067 btinfo->replay = NULL;
2068
2069 registers_changed_thread (tp);
2070
2071 throw;
2072 }
2073
2074 return replay;
2075 }
2076
2077 /* Stop replaying a thread. */
2078
2079 static void
2080 record_btrace_stop_replaying (struct thread_info *tp)
2081 {
2082 struct btrace_thread_info *btinfo;
2083
2084 btinfo = &tp->btrace;
2085
2086 xfree (btinfo->replay);
2087 btinfo->replay = NULL;
2088
2089 /* Make sure we're not leaving any stale registers. */
2090 registers_changed_thread (tp);
2091 }
2092
2093 /* Stop replaying TP if it is at the end of its execution history. */
2094
2095 static void
2096 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2097 {
2098 struct btrace_insn_iterator *replay, end;
2099 struct btrace_thread_info *btinfo;
2100
2101 btinfo = &tp->btrace;
2102 replay = btinfo->replay;
2103
2104 if (replay == NULL)
2105 return;
2106
2107 btrace_insn_end (&end, btinfo);
2108
2109 if (btrace_insn_cmp (replay, &end) == 0)
2110 record_btrace_stop_replaying (tp);
2111 }
2112
2113 /* The resume method of target record-btrace. */
2114
2115 void
2116 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2117 {
2118 enum btrace_thread_flag flag, cflag;
2119
2120 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2121 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2122 step ? "step" : "cont");
2123
2124 /* Store the execution direction of the last resume.
2125
2126 If there is more than one resume call, we have to rely on infrun
2127 to not change the execution direction in-between. */
2128 record_btrace_resume_exec_dir = ::execution_direction;
2129
2130 /* As long as we're not replaying, just forward the request.
2131
2132 For non-stop targets this means that no thread is replaying. In order to
2133 make progress, we may need to explicitly move replaying threads to the end
2134 of their execution history. */
2135 if ((::execution_direction != EXEC_REVERSE)
2136 && !record_is_replaying (minus_one_ptid))
2137 {
2138 this->beneath ()->resume (ptid, step, signal);
2139 return;
2140 }
2141
2142 /* Compute the btrace thread flag for the requested move. */
2143 if (::execution_direction == EXEC_REVERSE)
2144 {
2145 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2146 cflag = BTHR_RCONT;
2147 }
2148 else
2149 {
2150 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2151 cflag = BTHR_CONT;
2152 }
2153
2154 /* We just indicate the resume intent here. The actual stepping happens in
2155 record_btrace_wait below.
2156
2157 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2158 if (!target_is_non_stop_p ())
2159 {
2160 gdb_assert (inferior_ptid.matches (ptid));
2161
2162 for (thread_info *tp : all_non_exited_threads (ptid))
2163 {
2164 if (tp->ptid.matches (inferior_ptid))
2165 record_btrace_resume_thread (tp, flag);
2166 else
2167 record_btrace_resume_thread (tp, cflag);
2168 }
2169 }
2170 else
2171 {
2172 for (thread_info *tp : all_non_exited_threads (ptid))
2173 record_btrace_resume_thread (tp, flag);
2174 }
2175
2176 /* Async support. */
2177 if (target_can_async_p ())
2178 {
2179 target_async (1);
2180 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2181 }
2182 }
2183
2184 /* The commit_resume method of target record-btrace. */
2185
2186 void
2187 record_btrace_target::commit_resume ()
2188 {
2189 if ((::execution_direction != EXEC_REVERSE)
2190 && !record_is_replaying (minus_one_ptid))
2191 beneath ()->commit_resume ();
2192 }
2193
2194 /* Cancel resuming TP. */
2195
2196 static void
2197 record_btrace_cancel_resume (struct thread_info *tp)
2198 {
2199 enum btrace_thread_flag flags;
2200
2201 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2202 if (flags == 0)
2203 return;
2204
2205 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2206 print_thread_id (tp),
2207 target_pid_to_str (tp->ptid).c_str (), flags,
2208 btrace_thread_flag_to_str (flags));
2209
2210 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2211 record_btrace_stop_replaying_at_end (tp);
2212 }
2213
2214 /* Return a target_waitstatus indicating that we ran out of history. */
2215
2216 static struct target_waitstatus
2217 btrace_step_no_history (void)
2218 {
2219 struct target_waitstatus status;
2220
2221 status.kind = TARGET_WAITKIND_NO_HISTORY;
2222
2223 return status;
2224 }
2225
2226 /* Return a target_waitstatus indicating that a step finished. */
2227
2228 static struct target_waitstatus
2229 btrace_step_stopped (void)
2230 {
2231 struct target_waitstatus status;
2232
2233 status.kind = TARGET_WAITKIND_STOPPED;
2234 status.value.sig = GDB_SIGNAL_TRAP;
2235
2236 return status;
2237 }
2238
2239 /* Return a target_waitstatus indicating that a thread was stopped as
2240 requested. */
2241
2242 static struct target_waitstatus
2243 btrace_step_stopped_on_request (void)
2244 {
2245 struct target_waitstatus status;
2246
2247 status.kind = TARGET_WAITKIND_STOPPED;
2248 status.value.sig = GDB_SIGNAL_0;
2249
2250 return status;
2251 }
2252
2253 /* Return a target_waitstatus indicating a spurious stop. */
2254
2255 static struct target_waitstatus
2256 btrace_step_spurious (void)
2257 {
2258 struct target_waitstatus status;
2259
2260 status.kind = TARGET_WAITKIND_SPURIOUS;
2261
2262 return status;
2263 }
2264
2265 /* Return a target_waitstatus indicating that the thread was not resumed. */
2266
2267 static struct target_waitstatus
2268 btrace_step_no_resumed (void)
2269 {
2270 struct target_waitstatus status;
2271
2272 status.kind = TARGET_WAITKIND_NO_RESUMED;
2273
2274 return status;
2275 }
2276
2277 /* Return a target_waitstatus indicating that we should wait again. */
2278
2279 static struct target_waitstatus
2280 btrace_step_again (void)
2281 {
2282 struct target_waitstatus status;
2283
2284 status.kind = TARGET_WAITKIND_IGNORE;
2285
2286 return status;
2287 }
2288
2289 /* Clear the record histories. */
2290
2291 static void
2292 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2293 {
2294 xfree (btinfo->insn_history);
2295 xfree (btinfo->call_history);
2296
2297 btinfo->insn_history = NULL;
2298 btinfo->call_history = NULL;
2299 }
2300
2301 /* Check whether TP's current replay position is at a breakpoint. */
2302
2303 static int
2304 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2305 {
2306 struct btrace_insn_iterator *replay;
2307 struct btrace_thread_info *btinfo;
2308 const struct btrace_insn *insn;
2309
2310 btinfo = &tp->btrace;
2311 replay = btinfo->replay;
2312
2313 if (replay == NULL)
2314 return 0;
2315
2316 insn = btrace_insn_get (replay);
2317 if (insn == NULL)
2318 return 0;
2319
2320 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2321 &btinfo->stop_reason);
2322 }
2323
2324 /* Step one instruction in forward direction. */
2325
2326 static struct target_waitstatus
2327 record_btrace_single_step_forward (struct thread_info *tp)
2328 {
2329 struct btrace_insn_iterator *replay, end, start;
2330 struct btrace_thread_info *btinfo;
2331
2332 btinfo = &tp->btrace;
2333 replay = btinfo->replay;
2334
2335 /* We're done if we're not replaying. */
2336 if (replay == NULL)
2337 return btrace_step_no_history ();
2338
2339 /* Check if we're stepping a breakpoint. */
2340 if (record_btrace_replay_at_breakpoint (tp))
2341 return btrace_step_stopped ();
2342
2343 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2344 jump back to the instruction at which we started. */
2345 start = *replay;
2346 do
2347 {
2348 unsigned int steps;
2349
2350 /* We will bail out here if we continue stepping after reaching the end
2351 of the execution history. */
2352 steps = btrace_insn_next (replay, 1);
2353 if (steps == 0)
2354 {
2355 *replay = start;
2356 return btrace_step_no_history ();
2357 }
2358 }
2359 while (btrace_insn_get (replay) == NULL);
2360
2361 /* Determine the end of the instruction trace. */
2362 btrace_insn_end (&end, btinfo);
2363
2364 /* The execution trace contains (and ends with) the current instruction.
2365 This instruction has not been executed, yet, so the trace really ends
2366 one instruction earlier. */
2367 if (btrace_insn_cmp (replay, &end) == 0)
2368 return btrace_step_no_history ();
2369
2370 return btrace_step_spurious ();
2371 }
2372
2373 /* Step one instruction in backward direction. */
2374
2375 static struct target_waitstatus
2376 record_btrace_single_step_backward (struct thread_info *tp)
2377 {
2378 struct btrace_insn_iterator *replay, start;
2379 struct btrace_thread_info *btinfo;
2380
2381 btinfo = &tp->btrace;
2382 replay = btinfo->replay;
2383
2384 /* Start replaying if we're not already doing so. */
2385 if (replay == NULL)
2386 replay = record_btrace_start_replaying (tp);
2387
2388 /* If we can't step any further, we reached the end of the history.
2389 Skip gaps during replay. If we end up at a gap (at the beginning of
2390 the trace), jump back to the instruction at which we started. */
2391 start = *replay;
2392 do
2393 {
2394 unsigned int steps;
2395
2396 steps = btrace_insn_prev (replay, 1);
2397 if (steps == 0)
2398 {
2399 *replay = start;
2400 return btrace_step_no_history ();
2401 }
2402 }
2403 while (btrace_insn_get (replay) == NULL);
2404
2405 /* Check if we're stepping a breakpoint.
2406
2407 For reverse-stepping, this check is after the step. There is logic in
2408 infrun.c that handles reverse-stepping separately. See, for example,
2409 proceed and adjust_pc_after_break.
2410
2411 This code assumes that for reverse-stepping, PC points to the last
2412 de-executed instruction, whereas for forward-stepping PC points to the
2413 next to-be-executed instruction. */
2414 if (record_btrace_replay_at_breakpoint (tp))
2415 return btrace_step_stopped ();
2416
2417 return btrace_step_spurious ();
2418 }
2419
2420 /* Step a single thread. */
2421
2422 static struct target_waitstatus
2423 record_btrace_step_thread (struct thread_info *tp)
2424 {
2425 struct btrace_thread_info *btinfo;
2426 struct target_waitstatus status;
2427 enum btrace_thread_flag flags;
2428
2429 btinfo = &tp->btrace;
2430
2431 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2432 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2433
2434 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2435 target_pid_to_str (tp->ptid).c_str (), flags,
2436 btrace_thread_flag_to_str (flags));
2437
2438 /* We can't step without an execution history. */
2439 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2440 return btrace_step_no_history ();
2441
2442 switch (flags)
2443 {
2444 default:
2445 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2446
2447 case BTHR_STOP:
2448 return btrace_step_stopped_on_request ();
2449
2450 case BTHR_STEP:
2451 status = record_btrace_single_step_forward (tp);
2452 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2453 break;
2454
2455 return btrace_step_stopped ();
2456
2457 case BTHR_RSTEP:
2458 status = record_btrace_single_step_backward (tp);
2459 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2460 break;
2461
2462 return btrace_step_stopped ();
2463
2464 case BTHR_CONT:
2465 status = record_btrace_single_step_forward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2467 break;
2468
2469 btinfo->flags |= flags;
2470 return btrace_step_again ();
2471
2472 case BTHR_RCONT:
2473 status = record_btrace_single_step_backward (tp);
2474 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2475 break;
2476
2477 btinfo->flags |= flags;
2478 return btrace_step_again ();
2479 }
2480
2481 /* We keep threads moving at the end of their execution history. The wait
2482 method will stop the thread for whom the event is reported. */
2483 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2484 btinfo->flags |= flags;
2485
2486 return status;
2487 }
2488
2489 /* Announce further events if necessary. */
2490
2491 static void
2492 record_btrace_maybe_mark_async_event
2493 (const std::vector<thread_info *> &moving,
2494 const std::vector<thread_info *> &no_history)
2495 {
2496 bool more_moving = !moving.empty ();
2497 bool more_no_history = !no_history.empty ();;
2498
2499 if (!more_moving && !more_no_history)
2500 return;
2501
2502 if (more_moving)
2503 DEBUG ("movers pending");
2504
2505 if (more_no_history)
2506 DEBUG ("no-history pending");
2507
2508 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2509 }
2510
2511 /* The wait method of target record-btrace. */
2512
2513 ptid_t
2514 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2515 int options)
2516 {
2517 std::vector<thread_info *> moving;
2518 std::vector<thread_info *> no_history;
2519
2520 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2521
2522 /* As long as we're not replaying, just forward the request. */
2523 if ((::execution_direction != EXEC_REVERSE)
2524 && !record_is_replaying (minus_one_ptid))
2525 {
2526 return this->beneath ()->wait (ptid, status, options);
2527 }
2528
2529 /* Keep a work list of moving threads. */
2530 for (thread_info *tp : all_non_exited_threads (ptid))
2531 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2532 moving.push_back (tp);
2533
2534 if (moving.empty ())
2535 {
2536 *status = btrace_step_no_resumed ();
2537
2538 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2539 target_waitstatus_to_string (status).c_str ());
2540
2541 return null_ptid;
2542 }
2543
2544 /* Step moving threads one by one, one step each, until either one thread
2545 reports an event or we run out of threads to step.
2546
2547 When stepping more than one thread, chances are that some threads reach
2548 the end of their execution history earlier than others. If we reported
2549 this immediately, all-stop on top of non-stop would stop all threads and
2550 resume the same threads next time. And we would report the same thread
2551 having reached the end of its execution history again.
2552
2553 In the worst case, this would starve the other threads. But even if other
2554 threads would be allowed to make progress, this would result in far too
2555 many intermediate stops.
2556
2557 We therefore delay the reporting of "no execution history" until we have
2558 nothing else to report. By this time, all threads should have moved to
2559 either the beginning or the end of their execution history. There will
2560 be a single user-visible stop. */
2561 struct thread_info *eventing = NULL;
2562 while ((eventing == NULL) && !moving.empty ())
2563 {
2564 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2565 {
2566 thread_info *tp = moving[ix];
2567
2568 *status = record_btrace_step_thread (tp);
2569
2570 switch (status->kind)
2571 {
2572 case TARGET_WAITKIND_IGNORE:
2573 ix++;
2574 break;
2575
2576 case TARGET_WAITKIND_NO_HISTORY:
2577 no_history.push_back (ordered_remove (moving, ix));
2578 break;
2579
2580 default:
2581 eventing = unordered_remove (moving, ix);
2582 break;
2583 }
2584 }
2585 }
2586
2587 if (eventing == NULL)
2588 {
2589 /* We started with at least one moving thread. This thread must have
2590 either stopped or reached the end of its execution history.
2591
2592 In the former case, EVENTING must not be NULL.
2593 In the latter case, NO_HISTORY must not be empty. */
2594 gdb_assert (!no_history.empty ());
2595
2596 /* We kept threads moving at the end of their execution history. Stop
2597 EVENTING now that we are going to report its stop. */
2598 eventing = unordered_remove (no_history, 0);
2599 eventing->btrace.flags &= ~BTHR_MOVE;
2600
2601 *status = btrace_step_no_history ();
2602 }
2603
2604 gdb_assert (eventing != NULL);
2605
2606 /* We kept threads replaying at the end of their execution history. Stop
2607 replaying EVENTING now that we are going to report its stop. */
2608 record_btrace_stop_replaying_at_end (eventing);
2609
2610 /* Stop all other threads. */
2611 if (!target_is_non_stop_p ())
2612 {
2613 for (thread_info *tp : all_non_exited_threads ())
2614 record_btrace_cancel_resume (tp);
2615 }
2616
2617 /* In async mode, we need to announce further events. */
2618 if (target_is_async_p ())
2619 record_btrace_maybe_mark_async_event (moving, no_history);
2620
2621 /* Start record histories anew from the current position. */
2622 record_btrace_clear_histories (&eventing->btrace);
2623
2624 /* We moved the replay position but did not update registers. */
2625 registers_changed_thread (eventing);
2626
2627 DEBUG ("wait ended by thread %s (%s): %s",
2628 print_thread_id (eventing),
2629 target_pid_to_str (eventing->ptid).c_str (),
2630 target_waitstatus_to_string (status).c_str ());
2631
2632 return eventing->ptid;
2633 }
2634
2635 /* The stop method of target record-btrace. */
2636
2637 void
2638 record_btrace_target::stop (ptid_t ptid)
2639 {
2640 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2641
2642 /* As long as we're not replaying, just forward the request. */
2643 if ((::execution_direction != EXEC_REVERSE)
2644 && !record_is_replaying (minus_one_ptid))
2645 {
2646 this->beneath ()->stop (ptid);
2647 }
2648 else
2649 {
2650 for (thread_info *tp : all_non_exited_threads (ptid))
2651 {
2652 tp->btrace.flags &= ~BTHR_MOVE;
2653 tp->btrace.flags |= BTHR_STOP;
2654 }
2655 }
2656 }
2657
2658 /* The can_execute_reverse method of target record-btrace. */
2659
2660 bool
2661 record_btrace_target::can_execute_reverse ()
2662 {
2663 return true;
2664 }
2665
2666 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2667
2668 bool
2669 record_btrace_target::stopped_by_sw_breakpoint ()
2670 {
2671 if (record_is_replaying (minus_one_ptid))
2672 {
2673 struct thread_info *tp = inferior_thread ();
2674
2675 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2676 }
2677
2678 return this->beneath ()->stopped_by_sw_breakpoint ();
2679 }
2680
2681 /* The supports_stopped_by_sw_breakpoint method of target
2682 record-btrace. */
2683
2684 bool
2685 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2686 {
2687 if (record_is_replaying (minus_one_ptid))
2688 return true;
2689
2690 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2691 }
2692
2693 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2694
2695 bool
2696 record_btrace_target::stopped_by_hw_breakpoint ()
2697 {
2698 if (record_is_replaying (minus_one_ptid))
2699 {
2700 struct thread_info *tp = inferior_thread ();
2701
2702 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2703 }
2704
2705 return this->beneath ()->stopped_by_hw_breakpoint ();
2706 }
2707
2708 /* The supports_stopped_by_hw_breakpoint method of target
2709 record-btrace. */
2710
2711 bool
2712 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2713 {
2714 if (record_is_replaying (minus_one_ptid))
2715 return true;
2716
2717 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2718 }
2719
2720 /* The update_thread_list method of target record-btrace. */
2721
2722 void
2723 record_btrace_target::update_thread_list ()
2724 {
2725 /* We don't add or remove threads during replay. */
2726 if (record_is_replaying (minus_one_ptid))
2727 return;
2728
2729 /* Forward the request. */
2730 this->beneath ()->update_thread_list ();
2731 }
2732
2733 /* The thread_alive method of target record-btrace. */
2734
2735 bool
2736 record_btrace_target::thread_alive (ptid_t ptid)
2737 {
2738 /* We don't add or remove threads during replay. */
2739 if (record_is_replaying (minus_one_ptid))
2740 return true;
2741
2742 /* Forward the request. */
2743 return this->beneath ()->thread_alive (ptid);
2744 }
2745
2746 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2747 is stopped. */
2748
2749 static void
2750 record_btrace_set_replay (struct thread_info *tp,
2751 const struct btrace_insn_iterator *it)
2752 {
2753 struct btrace_thread_info *btinfo;
2754
2755 btinfo = &tp->btrace;
2756
2757 if (it == NULL)
2758 record_btrace_stop_replaying (tp);
2759 else
2760 {
2761 if (btinfo->replay == NULL)
2762 record_btrace_start_replaying (tp);
2763 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2764 return;
2765
2766 *btinfo->replay = *it;
2767 registers_changed_thread (tp);
2768 }
2769
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo);
2772
2773 inferior_thread ()->suspend.stop_pc
2774 = regcache_read_pc (get_current_regcache ());
2775 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2776 }
2777
2778 /* The goto_record_begin method of target record-btrace. */
2779
2780 void
2781 record_btrace_target::goto_record_begin ()
2782 {
2783 struct thread_info *tp;
2784 struct btrace_insn_iterator begin;
2785
2786 tp = require_btrace_thread ();
2787
2788 btrace_insn_begin (&begin, &tp->btrace);
2789
2790 /* Skip gaps at the beginning of the trace. */
2791 while (btrace_insn_get (&begin) == NULL)
2792 {
2793 unsigned int steps;
2794
2795 steps = btrace_insn_next (&begin, 1);
2796 if (steps == 0)
2797 error (_("No trace."));
2798 }
2799
2800 record_btrace_set_replay (tp, &begin);
2801 }
2802
2803 /* The goto_record_end method of target record-btrace. */
2804
2805 void
2806 record_btrace_target::goto_record_end ()
2807 {
2808 struct thread_info *tp;
2809
2810 tp = require_btrace_thread ();
2811
2812 record_btrace_set_replay (tp, NULL);
2813 }
2814
2815 /* The goto_record method of target record-btrace. */
2816
2817 void
2818 record_btrace_target::goto_record (ULONGEST insn)
2819 {
2820 struct thread_info *tp;
2821 struct btrace_insn_iterator it;
2822 unsigned int number;
2823 int found;
2824
2825 number = insn;
2826
2827 /* Check for wrap-arounds. */
2828 if (number != insn)
2829 error (_("Instruction number out of range."));
2830
2831 tp = require_btrace_thread ();
2832
2833 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2834
2835 /* Check if the instruction could not be found or is a gap. */
2836 if (found == 0 || btrace_insn_get (&it) == NULL)
2837 error (_("No such instruction."));
2838
2839 record_btrace_set_replay (tp, &it);
2840 }
2841
2842 /* The record_stop_replaying method of target record-btrace. */
2843
2844 void
2845 record_btrace_target::record_stop_replaying ()
2846 {
2847 for (thread_info *tp : all_non_exited_threads ())
2848 record_btrace_stop_replaying (tp);
2849 }
2850
2851 /* The execution_direction target method. */
2852
2853 enum exec_direction_kind
2854 record_btrace_target::execution_direction ()
2855 {
2856 return record_btrace_resume_exec_dir;
2857 }
2858
2859 /* The prepare_to_generate_core target method. */
2860
2861 void
2862 record_btrace_target::prepare_to_generate_core ()
2863 {
2864 record_btrace_generating_corefile = 1;
2865 }
2866
2867 /* The done_generating_core target method. */
2868
2869 void
2870 record_btrace_target::done_generating_core ()
2871 {
2872 record_btrace_generating_corefile = 0;
2873 }
2874
2875 /* Start recording in BTS format. */
2876
2877 static void
2878 cmd_record_btrace_bts_start (const char *args, int from_tty)
2879 {
2880 if (args != NULL && *args != 0)
2881 error (_("Invalid argument."));
2882
2883 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2884
2885 try
2886 {
2887 execute_command ("target record-btrace", from_tty);
2888 }
2889 catch (const gdb_exception &exception)
2890 {
2891 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2892 throw;
2893 }
2894 }
2895
2896 /* Start recording in Intel Processor Trace format. */
2897
2898 static void
2899 cmd_record_btrace_pt_start (const char *args, int from_tty)
2900 {
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2903
2904 record_btrace_conf.format = BTRACE_FORMAT_PT;
2905
2906 try
2907 {
2908 execute_command ("target record-btrace", from_tty);
2909 }
2910 catch (const gdb_exception &exception)
2911 {
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2913 throw;
2914 }
2915 }
2916
2917 /* Alias for "target record". */
2918
2919 static void
2920 cmd_record_btrace_start (const char *args, int from_tty)
2921 {
2922 if (args != NULL && *args != 0)
2923 error (_("Invalid argument."));
2924
2925 record_btrace_conf.format = BTRACE_FORMAT_PT;
2926
2927 try
2928 {
2929 execute_command ("target record-btrace", from_tty);
2930 }
2931 catch (const gdb_exception &exception)
2932 {
2933 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2934
2935 try
2936 {
2937 execute_command ("target record-btrace", from_tty);
2938 }
2939 catch (const gdb_exception &ex)
2940 {
2941 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2942 throw;
2943 }
2944 }
2945 }
2946
2947 /* The "set record btrace" command. */
2948
2949 static void
2950 cmd_set_record_btrace (const char *args, int from_tty)
2951 {
2952 printf_unfiltered (_("\"set record btrace\" must be followed "
2953 "by an appropriate subcommand.\n"));
2954 help_list (set_record_btrace_cmdlist, "set record btrace ",
2955 all_commands, gdb_stdout);
2956 }
2957
2958 /* The "show record btrace" command. */
2959
2960 static void
2961 cmd_show_record_btrace (const char *args, int from_tty)
2962 {
2963 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2964 }
2965
2966 /* The "show record btrace replay-memory-access" command. */
2967
2968 static void
2969 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2970 struct cmd_list_element *c, const char *value)
2971 {
2972 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2973 replay_memory_access);
2974 }
2975
2976 /* The "set record btrace cpu none" command. */
2977
2978 static void
2979 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2980 {
2981 if (args != nullptr && *args != 0)
2982 error (_("Trailing junk: '%s'."), args);
2983
2984 record_btrace_cpu_state = CS_NONE;
2985 }
2986
2987 /* The "set record btrace cpu auto" command. */
2988
2989 static void
2990 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2991 {
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2994
2995 record_btrace_cpu_state = CS_AUTO;
2996 }
2997
2998 /* The "set record btrace cpu" command. */
2999
3000 static void
3001 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3002 {
3003 if (args == nullptr)
3004 args = "";
3005
3006 /* We use a hard-coded vendor string for now. */
3007 unsigned int family, model, stepping;
3008 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3009 &model, &l1, &stepping, &l2);
3010 if (matches == 3)
3011 {
3012 if (strlen (args) != l2)
3013 error (_("Trailing junk: '%s'."), args + l2);
3014 }
3015 else if (matches == 2)
3016 {
3017 if (strlen (args) != l1)
3018 error (_("Trailing junk: '%s'."), args + l1);
3019
3020 stepping = 0;
3021 }
3022 else
3023 error (_("Bad format. See \"help set record btrace cpu\"."));
3024
3025 if (USHRT_MAX < family)
3026 error (_("Cpu family too big."));
3027
3028 if (UCHAR_MAX < model)
3029 error (_("Cpu model too big."));
3030
3031 if (UCHAR_MAX < stepping)
3032 error (_("Cpu stepping too big."));
3033
3034 record_btrace_cpu.vendor = CV_INTEL;
3035 record_btrace_cpu.family = family;
3036 record_btrace_cpu.model = model;
3037 record_btrace_cpu.stepping = stepping;
3038
3039 record_btrace_cpu_state = CS_CPU;
3040 }
3041
3042 /* The "show record btrace cpu" command. */
3043
3044 static void
3045 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3046 {
3047 if (args != nullptr && *args != 0)
3048 error (_("Trailing junk: '%s'."), args);
3049
3050 switch (record_btrace_cpu_state)
3051 {
3052 case CS_AUTO:
3053 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3054 return;
3055
3056 case CS_NONE:
3057 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3058 return;
3059
3060 case CS_CPU:
3061 switch (record_btrace_cpu.vendor)
3062 {
3063 case CV_INTEL:
3064 if (record_btrace_cpu.stepping == 0)
3065 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3066 record_btrace_cpu.family,
3067 record_btrace_cpu.model);
3068 else
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3070 record_btrace_cpu.family,
3071 record_btrace_cpu.model,
3072 record_btrace_cpu.stepping);
3073 return;
3074 }
3075 }
3076
3077 error (_("Internal error: bad cpu state."));
3078 }
3079
3080 /* The "s record btrace bts" command. */
3081
3082 static void
3083 cmd_set_record_btrace_bts (const char *args, int from_tty)
3084 {
3085 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3086 "by an appropriate subcommand.\n"));
3087 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3088 all_commands, gdb_stdout);
3089 }
3090
3091 /* The "show record btrace bts" command. */
3092
3093 static void
3094 cmd_show_record_btrace_bts (const char *args, int from_tty)
3095 {
3096 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3097 }
3098
3099 /* The "set record btrace pt" command. */
3100
3101 static void
3102 cmd_set_record_btrace_pt (const char *args, int from_tty)
3103 {
3104 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3105 "by an appropriate subcommand.\n"));
3106 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3107 all_commands, gdb_stdout);
3108 }
3109
3110 /* The "show record btrace pt" command. */
3111
3112 static void
3113 cmd_show_record_btrace_pt (const char *args, int from_tty)
3114 {
3115 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3116 }
3117
3118 /* The "record bts buffer-size" show value function. */
3119
3120 static void
3121 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3122 struct cmd_list_element *c,
3123 const char *value)
3124 {
3125 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3126 value);
3127 }
3128
3129 /* The "record pt buffer-size" show value function. */
3130
3131 static void
3132 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3133 struct cmd_list_element *c,
3134 const char *value)
3135 {
3136 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3137 value);
3138 }
3139
3140 /* Initialize btrace commands. */
3141
3142 void
3143 _initialize_record_btrace (void)
3144 {
3145 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3146 _("Start branch trace recording."), &record_btrace_cmdlist,
3147 "record btrace ", 0, &record_cmdlist);
3148 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3149
3150 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3151 _("\
3152 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3153 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3154 This format may not be available on all processors."),
3155 &record_btrace_cmdlist);
3156 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3157
3158 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3159 _("\
3160 Start branch trace recording in Intel Processor Trace format.\n\n\
3161 This format may not be available on all processors."),
3162 &record_btrace_cmdlist);
3163 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3164
3165 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3166 _("Set record options."), &set_record_btrace_cmdlist,
3167 "set record btrace ", 0, &set_record_cmdlist);
3168
3169 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3170 _("Show record options."), &show_record_btrace_cmdlist,
3171 "show record btrace ", 0, &show_record_cmdlist);
3172
3173 add_setshow_enum_cmd ("replay-memory-access", no_class,
3174 replay_memory_access_types, &replay_memory_access, _("\
3175 Set what memory accesses are allowed during replay."), _("\
3176 Show what memory accesses are allowed during replay."),
3177 _("Default is READ-ONLY.\n\n\
3178 The btrace record target does not trace data.\n\
3179 The memory therefore corresponds to the live target and not \
3180 to the current replay position.\n\n\
3181 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3182 When READ-WRITE, allow accesses to read-only and read-write memory during \
3183 replay."),
3184 NULL, cmd_show_replay_memory_access,
3185 &set_record_btrace_cmdlist,
3186 &show_record_btrace_cmdlist);
3187
3188 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3189 _("\
3190 Set the cpu to be used for trace decode.\n\n\
3191 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3192 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3193 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3194 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3195 When GDB does not support that cpu, this option can be used to enable\n\
3196 workarounds for a similar cpu that GDB supports.\n\n\
3197 When set to \"none\", errata workarounds are disabled."),
3198 &set_record_btrace_cpu_cmdlist,
3199 "set record btrace cpu ", 1,
3200 &set_record_btrace_cmdlist);
3201
3202 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3203 Automatically determine the cpu to be used for trace decode."),
3204 &set_record_btrace_cpu_cmdlist);
3205
3206 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3207 Do not enable errata workarounds for trace decode."),
3208 &set_record_btrace_cpu_cmdlist);
3209
3210 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3211 Show the cpu to be used for trace decode."),
3212 &show_record_btrace_cmdlist);
3213
3214 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3215 _("Set record btrace bts options."),
3216 &set_record_btrace_bts_cmdlist,
3217 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3218
3219 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3220 _("Show record btrace bts options."),
3221 &show_record_btrace_bts_cmdlist,
3222 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3223
3224 add_setshow_uinteger_cmd ("buffer-size", no_class,
3225 &record_btrace_conf.bts.size,
3226 _("Set the record/replay bts buffer size."),
3227 _("Show the record/replay bts buffer size."), _("\
3228 When starting recording request a trace buffer of this size. \
3229 The actual buffer size may differ from the requested size. \
3230 Use \"info record\" to see the actual buffer size.\n\n\
3231 Bigger buffers allow longer recording but also take more time to process \
3232 the recorded execution trace.\n\n\
3233 The trace buffer size may not be changed while recording."), NULL,
3234 show_record_bts_buffer_size_value,
3235 &set_record_btrace_bts_cmdlist,
3236 &show_record_btrace_bts_cmdlist);
3237
3238 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3239 _("Set record btrace pt options."),
3240 &set_record_btrace_pt_cmdlist,
3241 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3242
3243 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3244 _("Show record btrace pt options."),
3245 &show_record_btrace_pt_cmdlist,
3246 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3247
3248 add_setshow_uinteger_cmd ("buffer-size", no_class,
3249 &record_btrace_conf.pt.size,
3250 _("Set the record/replay pt buffer size."),
3251 _("Show the record/replay pt buffer size."), _("\
3252 Bigger buffers allow longer recording but also take more time to process \
3253 the recorded execution.\n\
3254 The actual buffer size may differ from the requested size. Use \"info record\" \
3255 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3256 &set_record_btrace_pt_cmdlist,
3257 &show_record_btrace_pt_cmdlist);
3258
3259 add_target (record_btrace_target_info, record_btrace_target_open);
3260
3261 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3262 xcalloc, xfree);
3263
3264 record_btrace_conf.bts.size = 64 * 1024;
3265 record_btrace_conf.pt.size = 16 * 1024;
3266 }