]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Automatic date update in version.in
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "gdbsupport/vec.h"
42 #include "inferior.h"
43 #include <algorithm>
44 #include "gdbarch.h"
45 #include "cli/cli-style.h"
46
47 static const target_info record_btrace_target_info = {
48 "record-btrace",
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
51 };
52
53 /* The target_ops of record-btrace. */
54
55 class record_btrace_target final : public target_ops
56 {
57 public:
58 const target_info &info () const override
59 { return record_btrace_target_info; }
60
61 strata stratum () const override { return record_stratum; }
62
63 void close () override;
64 void async (int) override;
65
66 void detach (inferior *inf, int from_tty) override
67 { record_detach (this, inf, from_tty); }
68
69 void disconnect (const char *, int) override;
70
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
73
74 void kill () override
75 { record_kill (this); }
76
77 enum record_method record_method (ptid_t ptid) override;
78
79 void stop_recording () override;
80 void info_record () override;
81
82 void insn_history (int size, gdb_disassembly_flags flags) override;
83 void insn_history_from (ULONGEST from, int size,
84 gdb_disassembly_flags flags) override;
85 void insn_history_range (ULONGEST begin, ULONGEST end,
86 gdb_disassembly_flags flags) override;
87 void call_history (int size, record_print_flags flags) override;
88 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 override;
90 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 override;
92
93 bool record_is_replaying (ptid_t ptid) override;
94 bool record_will_replay (ptid_t ptid, int dir) override;
95 void record_stop_replaying () override;
96
97 enum target_xfer_status xfer_partial (enum target_object object,
98 const char *annex,
99 gdb_byte *readbuf,
100 const gdb_byte *writebuf,
101 ULONGEST offset, ULONGEST len,
102 ULONGEST *xfered_len) override;
103
104 int insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *) override;
106 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
107 enum remove_bp_reason) override;
108
109 void fetch_registers (struct regcache *, int) override;
110
111 void store_registers (struct regcache *, int) override;
112 void prepare_to_store (struct regcache *) override;
113
114 const struct frame_unwind *get_unwinder () override;
115
116 const struct frame_unwind *get_tailcall_unwinder () override;
117
118 void commit_resume () override;
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
124 bool thread_alive (ptid_t ptid) override;
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
129 bool can_execute_reverse () override;
130
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
133
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140 };
141
142 static record_btrace_target record_btrace_ops;
143
144 /* Initialize the record-btrace target ops. */
145
146 /* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
148 static const gdb::observers::token record_btrace_thread_observer_token {};
149
150 /* Memory access types used in set/show record btrace replay-memory-access. */
151 static const char replay_memory_access_read_only[] = "read-only";
152 static const char replay_memory_access_read_write[] = "read-write";
153 static const char *const replay_memory_access_types[] =
154 {
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158 };
159
160 /* The currently allowed replay memory access type. */
161 static const char *replay_memory_access = replay_memory_access_read_only;
162
163 /* The cpu state kinds. */
164 enum record_btrace_cpu_state_kind
165 {
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169 };
170
171 /* The current cpu state. */
172 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174 /* The current cpu for trace decode. */
175 static struct btrace_cpu record_btrace_cpu;
176
177 /* Command lists for "set/show record btrace". */
178 static struct cmd_list_element *set_record_btrace_cmdlist;
179 static struct cmd_list_element *show_record_btrace_cmdlist;
180
181 /* The execution direction of the last resume we got. See record-full.c. */
182 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184 /* The async event handler for reverse/replay execution. */
185 static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
187 /* A flag indicating that we are currently generating a core file. */
188 static int record_btrace_generating_corefile;
189
190 /* The current branch trace configuration. */
191 static struct btrace_config record_btrace_conf;
192
193 /* Command list for "record btrace". */
194 static struct cmd_list_element *record_btrace_cmdlist;
195
196 /* Command lists for "set/show record btrace bts". */
197 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
200 /* Command lists for "set/show record btrace pt". */
201 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
204 /* Command list for "set record btrace cpu". */
205 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
207 /* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210 #define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
220 /* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222 const struct btrace_cpu *
223 record_btrace_get_cpu (void)
224 {
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238 }
239
240 /* Update the branch trace for the current thread and return a pointer to its
241 thread_info.
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
246 static struct thread_info *
247 require_btrace_thread (void)
248 {
249 DEBUG ("require");
250
251 if (inferior_ptid == null_ptid)
252 error (_("No thread."));
253
254 thread_info *tp = inferior_thread ();
255
256 validate_registers_access ();
257
258 btrace_fetch (tp, record_btrace_get_cpu ());
259
260 if (btrace_is_empty (tp))
261 error (_("No trace."));
262
263 return tp;
264 }
265
266 /* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272 static struct btrace_thread_info *
273 require_btrace (void)
274 {
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
280 }
281
282 /* Enable branch tracing for one thread. Warn on errors. */
283
284 static void
285 record_btrace_enable_warn (struct thread_info *tp)
286 {
287 try
288 {
289 btrace_enable (tp, &record_btrace_conf);
290 }
291 catch (const gdb_exception_error &error)
292 {
293 warning ("%s", error.what ());
294 }
295 }
296
297 /* Enable automatic tracing of new threads. */
298
299 static void
300 record_btrace_auto_enable (void)
301 {
302 DEBUG ("attach thread observer");
303
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
306 }
307
308 /* Disable automatic tracing of new threads. */
309
310 static void
311 record_btrace_auto_disable (void)
312 {
313 DEBUG ("detach thread observer");
314
315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
316 }
317
318 /* The record-btrace async event handler function. */
319
320 static void
321 record_btrace_handle_async_inferior_event (gdb_client_data data)
322 {
323 inferior_event_handler (INF_REG_EVENT, NULL);
324 }
325
326 /* See record-btrace.h. */
327
328 void
329 record_btrace_push_target (void)
330 {
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
344 }
345
346 /* Disable btrace on a set of threads on scope exit. */
347
348 struct scoped_btrace_disable
349 {
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370 private:
371 std::forward_list<thread_info *> m_threads;
372 };
373
374 /* Open target record-btrace. */
375
376 static void
377 record_btrace_target_open (const char *args, int from_tty)
378 {
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
382
383 DEBUG ("open");
384
385 record_preopen ();
386
387 if (!target_has_execution)
388 error (_("The program is not being run."));
389
390 for (thread_info *tp : all_non_exited_threads ())
391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
392 {
393 btrace_enable (tp, &record_btrace_conf);
394
395 btrace_disable.add_thread (tp);
396 }
397
398 record_btrace_push_target ();
399
400 btrace_disable.discard ();
401 }
402
403 /* The stop_recording method of target record-btrace. */
404
405 void
406 record_btrace_target::stop_recording ()
407 {
408 DEBUG ("stop recording");
409
410 record_btrace_auto_disable ();
411
412 for (thread_info *tp : all_non_exited_threads ())
413 if (tp->btrace.target != NULL)
414 btrace_disable (tp);
415 }
416
417 /* The disconnect method of target record-btrace. */
418
419 void
420 record_btrace_target::disconnect (const char *args,
421 int from_tty)
422 {
423 struct target_ops *beneath = this->beneath ();
424
425 /* Do not stop recording, just clean up GDB side. */
426 unpush_target (this);
427
428 /* Forward disconnect. */
429 beneath->disconnect (args, from_tty);
430 }
431
432 /* The close method of target record-btrace. */
433
434 void
435 record_btrace_target::close ()
436 {
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
443
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
446 for (thread_info *tp : all_non_exited_threads ())
447 btrace_teardown (tp);
448 }
449
450 /* The async method of target record-btrace. */
451
452 void
453 record_btrace_target::async (int enable)
454 {
455 if (enable)
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 else
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459
460 this->beneath ()->async (enable);
461 }
462
463 /* Adjusts the size and returns a human readable size suffix. */
464
465 static const char *
466 record_btrace_adjust_size (unsigned int *size)
467 {
468 unsigned int sz;
469
470 sz = *size;
471
472 if ((sz & ((1u << 30) - 1)) == 0)
473 {
474 *size = sz >> 30;
475 return "GB";
476 }
477 else if ((sz & ((1u << 20) - 1)) == 0)
478 {
479 *size = sz >> 20;
480 return "MB";
481 }
482 else if ((sz & ((1u << 10) - 1)) == 0)
483 {
484 *size = sz >> 10;
485 return "kB";
486 }
487 else
488 return "";
489 }
490
491 /* Print a BTS configuration. */
492
493 static void
494 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
495 {
496 const char *suffix;
497 unsigned int size;
498
499 size = conf->size;
500 if (size > 0)
501 {
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
504 }
505 }
506
507 /* Print an Intel Processor Trace configuration. */
508
509 static void
510 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
511 {
512 const char *suffix;
513 unsigned int size;
514
515 size = conf->size;
516 if (size > 0)
517 {
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
520 }
521 }
522
523 /* Print a branch tracing configuration. */
524
525 static void
526 record_btrace_print_conf (const struct btrace_config *conf)
527 {
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
530
531 switch (conf->format)
532 {
533 case BTRACE_FORMAT_NONE:
534 return;
535
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
538 return;
539
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
542 return;
543 }
544
545 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
546 }
547
548 /* The info_record method of target record-btrace. */
549
550 void
551 record_btrace_target::info_record ()
552 {
553 struct btrace_thread_info *btinfo;
554 const struct btrace_config *conf;
555 struct thread_info *tp;
556 unsigned int insns, calls, gaps;
557
558 DEBUG ("info");
559
560 tp = find_thread_ptid (inferior_ptid);
561 if (tp == NULL)
562 error (_("No thread."));
563
564 validate_registers_access ();
565
566 btinfo = &tp->btrace;
567
568 conf = ::btrace_conf (btinfo);
569 if (conf != NULL)
570 record_btrace_print_conf (conf);
571
572 btrace_fetch (tp, record_btrace_get_cpu ());
573
574 insns = 0;
575 calls = 0;
576 gaps = 0;
577
578 if (!btrace_is_empty (tp))
579 {
580 struct btrace_call_iterator call;
581 struct btrace_insn_iterator insn;
582
583 btrace_call_end (&call, btinfo);
584 btrace_call_prev (&call, 1);
585 calls = btrace_call_number (&call);
586
587 btrace_insn_end (&insn, btinfo);
588 insns = btrace_insn_number (&insn);
589
590 /* If the last instruction is not a gap, it is the current instruction
591 that is not actually part of the record. */
592 if (btrace_insn_get (&insn) != NULL)
593 insns -= 1;
594
595 gaps = btinfo->ngaps;
596 }
597
598 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
599 "for thread %s (%s).\n"), insns, calls, gaps,
600 print_thread_id (tp),
601 target_pid_to_str (tp->ptid).c_str ());
602
603 if (btrace_is_replaying (tp))
604 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
605 btrace_insn_number (btinfo->replay));
606 }
607
608 /* Print a decode error. */
609
610 static void
611 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
612 enum btrace_format format)
613 {
614 const char *errstr = btrace_decode_error (format, errcode);
615
616 uiout->text (_("["));
617 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
618 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
619 {
620 uiout->text (_("decode error ("));
621 uiout->field_signed ("errcode", errcode);
622 uiout->text (_("): "));
623 }
624 uiout->text (errstr);
625 uiout->text (_("]\n"));
626 }
627
628 /* A range of source lines. */
629
630 struct btrace_line_range
631 {
632 /* The symtab this line is from. */
633 struct symtab *symtab;
634
635 /* The first line (inclusive). */
636 int begin;
637
638 /* The last line (exclusive). */
639 int end;
640 };
641
642 /* Construct a line range. */
643
644 static struct btrace_line_range
645 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
646 {
647 struct btrace_line_range range;
648
649 range.symtab = symtab;
650 range.begin = begin;
651 range.end = end;
652
653 return range;
654 }
655
656 /* Add a line to a line range. */
657
658 static struct btrace_line_range
659 btrace_line_range_add (struct btrace_line_range range, int line)
660 {
661 if (range.end <= range.begin)
662 {
663 /* This is the first entry. */
664 range.begin = line;
665 range.end = line + 1;
666 }
667 else if (line < range.begin)
668 range.begin = line;
669 else if (range.end < line)
670 range.end = line;
671
672 return range;
673 }
674
675 /* Return non-zero if RANGE is empty, zero otherwise. */
676
677 static int
678 btrace_line_range_is_empty (struct btrace_line_range range)
679 {
680 return range.end <= range.begin;
681 }
682
683 /* Return non-zero if LHS contains RHS, zero otherwise. */
684
685 static int
686 btrace_line_range_contains_range (struct btrace_line_range lhs,
687 struct btrace_line_range rhs)
688 {
689 return ((lhs.symtab == rhs.symtab)
690 && (lhs.begin <= rhs.begin)
691 && (rhs.end <= lhs.end));
692 }
693
694 /* Find the line range associated with PC. */
695
696 static struct btrace_line_range
697 btrace_find_line_range (CORE_ADDR pc)
698 {
699 struct btrace_line_range range;
700 struct linetable_entry *lines;
701 struct linetable *ltable;
702 struct symtab *symtab;
703 int nlines, i;
704
705 symtab = find_pc_line_symtab (pc);
706 if (symtab == NULL)
707 return btrace_mk_line_range (NULL, 0, 0);
708
709 ltable = SYMTAB_LINETABLE (symtab);
710 if (ltable == NULL)
711 return btrace_mk_line_range (symtab, 0, 0);
712
713 nlines = ltable->nitems;
714 lines = ltable->item;
715 if (nlines <= 0)
716 return btrace_mk_line_range (symtab, 0, 0);
717
718 range = btrace_mk_line_range (symtab, 0, 0);
719 for (i = 0; i < nlines - 1; i++)
720 {
721 if ((lines[i].pc == pc) && (lines[i].line != 0))
722 range = btrace_line_range_add (range, lines[i].line);
723 }
724
725 return range;
726 }
727
728 /* Print source lines in LINES to UIOUT.
729
730 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
731 instructions corresponding to that source line. When printing a new source
732 line, we do the cleanups for the open chain and open a new cleanup chain for
733 the new source line. If the source line range in LINES is not empty, this
734 function will leave the cleanup chain for the last printed source line open
735 so instructions can be added to it. */
736
737 static void
738 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
739 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
740 gdb::optional<ui_out_emit_list> *asm_list,
741 gdb_disassembly_flags flags)
742 {
743 print_source_lines_flags psl_flags;
744
745 if (flags & DISASSEMBLY_FILENAME)
746 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
747
748 for (int line = lines.begin; line < lines.end; ++line)
749 {
750 asm_list->reset ();
751
752 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
753
754 print_source_lines (lines.symtab, line, line + 1, psl_flags);
755
756 asm_list->emplace (uiout, "line_asm_insn");
757 }
758 }
759
760 /* Disassemble a section of the recorded instruction trace. */
761
762 static void
763 btrace_insn_history (struct ui_out *uiout,
764 const struct btrace_thread_info *btinfo,
765 const struct btrace_insn_iterator *begin,
766 const struct btrace_insn_iterator *end,
767 gdb_disassembly_flags flags)
768 {
769 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
770 btrace_insn_number (begin), btrace_insn_number (end));
771
772 flags |= DISASSEMBLY_SPECULATIVE;
773
774 struct gdbarch *gdbarch = target_gdbarch ();
775 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
776
777 ui_out_emit_list list_emitter (uiout, "asm_insns");
778
779 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
780 gdb::optional<ui_out_emit_list> asm_list;
781
782 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
783
784 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
785 btrace_insn_next (&it, 1))
786 {
787 const struct btrace_insn *insn;
788
789 insn = btrace_insn_get (&it);
790
791 /* A NULL instruction indicates a gap in the trace. */
792 if (insn == NULL)
793 {
794 const struct btrace_config *conf;
795
796 conf = btrace_conf (btinfo);
797
798 /* We have trace so we must have a configuration. */
799 gdb_assert (conf != NULL);
800
801 uiout->field_fmt ("insn-number", "%u",
802 btrace_insn_number (&it));
803 uiout->text ("\t");
804
805 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
806 conf->format);
807 }
808 else
809 {
810 struct disasm_insn dinsn;
811
812 if ((flags & DISASSEMBLY_SOURCE) != 0)
813 {
814 struct btrace_line_range lines;
815
816 lines = btrace_find_line_range (insn->pc);
817 if (!btrace_line_range_is_empty (lines)
818 && !btrace_line_range_contains_range (last_lines, lines))
819 {
820 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
821 flags);
822 last_lines = lines;
823 }
824 else if (!src_and_asm_tuple.has_value ())
825 {
826 gdb_assert (!asm_list.has_value ());
827
828 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
829
830 /* No source information. */
831 asm_list.emplace (uiout, "line_asm_insn");
832 }
833
834 gdb_assert (src_and_asm_tuple.has_value ());
835 gdb_assert (asm_list.has_value ());
836 }
837
838 memset (&dinsn, 0, sizeof (dinsn));
839 dinsn.number = btrace_insn_number (&it);
840 dinsn.addr = insn->pc;
841
842 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
843 dinsn.is_speculative = 1;
844
845 disasm.pretty_print_insn (&dinsn, flags);
846 }
847 }
848 }
849
850 /* The insn_history method of target record-btrace. */
851
852 void
853 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
854 {
855 struct btrace_thread_info *btinfo;
856 struct btrace_insn_history *history;
857 struct btrace_insn_iterator begin, end;
858 struct ui_out *uiout;
859 unsigned int context, covered;
860
861 uiout = current_uiout;
862 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
863 context = abs (size);
864 if (context == 0)
865 error (_("Bad record instruction-history-size."));
866
867 btinfo = require_btrace ();
868 history = btinfo->insn_history;
869 if (history == NULL)
870 {
871 struct btrace_insn_iterator *replay;
872
873 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
874
875 /* If we're replaying, we start at the replay position. Otherwise, we
876 start at the tail of the trace. */
877 replay = btinfo->replay;
878 if (replay != NULL)
879 begin = *replay;
880 else
881 btrace_insn_end (&begin, btinfo);
882
883 /* We start from here and expand in the requested direction. Then we
884 expand in the other direction, as well, to fill up any remaining
885 context. */
886 end = begin;
887 if (size < 0)
888 {
889 /* We want the current position covered, as well. */
890 covered = btrace_insn_next (&end, 1);
891 covered += btrace_insn_prev (&begin, context - covered);
892 covered += btrace_insn_next (&end, context - covered);
893 }
894 else
895 {
896 covered = btrace_insn_next (&end, context);
897 covered += btrace_insn_prev (&begin, context - covered);
898 }
899 }
900 else
901 {
902 begin = history->begin;
903 end = history->end;
904
905 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
906 btrace_insn_number (&begin), btrace_insn_number (&end));
907
908 if (size < 0)
909 {
910 end = begin;
911 covered = btrace_insn_prev (&begin, context);
912 }
913 else
914 {
915 begin = end;
916 covered = btrace_insn_next (&end, context);
917 }
918 }
919
920 if (covered > 0)
921 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
922 else
923 {
924 if (size < 0)
925 printf_unfiltered (_("At the start of the branch trace record.\n"));
926 else
927 printf_unfiltered (_("At the end of the branch trace record.\n"));
928 }
929
930 btrace_set_insn_history (btinfo, &begin, &end);
931 }
932
933 /* The insn_history_range method of target record-btrace. */
934
935 void
936 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
937 gdb_disassembly_flags flags)
938 {
939 struct btrace_thread_info *btinfo;
940 struct btrace_insn_iterator begin, end;
941 struct ui_out *uiout;
942 unsigned int low, high;
943 int found;
944
945 uiout = current_uiout;
946 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
947 low = from;
948 high = to;
949
950 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
951
952 /* Check for wrap-arounds. */
953 if (low != from || high != to)
954 error (_("Bad range."));
955
956 if (high < low)
957 error (_("Bad range."));
958
959 btinfo = require_btrace ();
960
961 found = btrace_find_insn_by_number (&begin, btinfo, low);
962 if (found == 0)
963 error (_("Range out of bounds."));
964
965 found = btrace_find_insn_by_number (&end, btinfo, high);
966 if (found == 0)
967 {
968 /* Silently truncate the range. */
969 btrace_insn_end (&end, btinfo);
970 }
971 else
972 {
973 /* We want both begin and end to be inclusive. */
974 btrace_insn_next (&end, 1);
975 }
976
977 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
978 btrace_set_insn_history (btinfo, &begin, &end);
979 }
980
981 /* The insn_history_from method of target record-btrace. */
982
983 void
984 record_btrace_target::insn_history_from (ULONGEST from, int size,
985 gdb_disassembly_flags flags)
986 {
987 ULONGEST begin, end, context;
988
989 context = abs (size);
990 if (context == 0)
991 error (_("Bad record instruction-history-size."));
992
993 if (size < 0)
994 {
995 end = from;
996
997 if (from < context)
998 begin = 0;
999 else
1000 begin = from - context + 1;
1001 }
1002 else
1003 {
1004 begin = from;
1005 end = from + context - 1;
1006
1007 /* Check for wrap-around. */
1008 if (end < begin)
1009 end = ULONGEST_MAX;
1010 }
1011
1012 insn_history_range (begin, end, flags);
1013 }
1014
1015 /* Print the instruction number range for a function call history line. */
1016
1017 static void
1018 btrace_call_history_insn_range (struct ui_out *uiout,
1019 const struct btrace_function *bfun)
1020 {
1021 unsigned int begin, end, size;
1022
1023 size = bfun->insn.size ();
1024 gdb_assert (size > 0);
1025
1026 begin = bfun->insn_offset;
1027 end = begin + size - 1;
1028
1029 uiout->field_unsigned ("insn begin", begin);
1030 uiout->text (",");
1031 uiout->field_unsigned ("insn end", end);
1032 }
1033
1034 /* Compute the lowest and highest source line for the instructions in BFUN
1035 and return them in PBEGIN and PEND.
1036 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1037 result from inlining or macro expansion. */
1038
1039 static void
1040 btrace_compute_src_line_range (const struct btrace_function *bfun,
1041 int *pbegin, int *pend)
1042 {
1043 struct symtab *symtab;
1044 struct symbol *sym;
1045 int begin, end;
1046
1047 begin = INT_MAX;
1048 end = INT_MIN;
1049
1050 sym = bfun->sym;
1051 if (sym == NULL)
1052 goto out;
1053
1054 symtab = symbol_symtab (sym);
1055
1056 for (const btrace_insn &insn : bfun->insn)
1057 {
1058 struct symtab_and_line sal;
1059
1060 sal = find_pc_line (insn.pc, 0);
1061 if (sal.symtab != symtab || sal.line == 0)
1062 continue;
1063
1064 begin = std::min (begin, sal.line);
1065 end = std::max (end, sal.line);
1066 }
1067
1068 out:
1069 *pbegin = begin;
1070 *pend = end;
1071 }
1072
1073 /* Print the source line information for a function call history line. */
1074
1075 static void
1076 btrace_call_history_src_line (struct ui_out *uiout,
1077 const struct btrace_function *bfun)
1078 {
1079 struct symbol *sym;
1080 int begin, end;
1081
1082 sym = bfun->sym;
1083 if (sym == NULL)
1084 return;
1085
1086 uiout->field_string ("file",
1087 symtab_to_filename_for_display (symbol_symtab (sym)),
1088 file_name_style.style ());
1089
1090 btrace_compute_src_line_range (bfun, &begin, &end);
1091 if (end < begin)
1092 return;
1093
1094 uiout->text (":");
1095 uiout->field_signed ("min line", begin);
1096
1097 if (end == begin)
1098 return;
1099
1100 uiout->text (",");
1101 uiout->field_signed ("max line", end);
1102 }
1103
1104 /* Get the name of a branch trace function. */
1105
1106 static const char *
1107 btrace_get_bfun_name (const struct btrace_function *bfun)
1108 {
1109 struct minimal_symbol *msym;
1110 struct symbol *sym;
1111
1112 if (bfun == NULL)
1113 return "??";
1114
1115 msym = bfun->msym;
1116 sym = bfun->sym;
1117
1118 if (sym != NULL)
1119 return SYMBOL_PRINT_NAME (sym);
1120 else if (msym != NULL)
1121 return MSYMBOL_PRINT_NAME (msym);
1122 else
1123 return "??";
1124 }
1125
1126 /* Disassemble a section of the recorded function trace. */
1127
1128 static void
1129 btrace_call_history (struct ui_out *uiout,
1130 const struct btrace_thread_info *btinfo,
1131 const struct btrace_call_iterator *begin,
1132 const struct btrace_call_iterator *end,
1133 int int_flags)
1134 {
1135 struct btrace_call_iterator it;
1136 record_print_flags flags = (enum record_print_flag) int_flags;
1137
1138 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1139 btrace_call_number (end));
1140
1141 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1142 {
1143 const struct btrace_function *bfun;
1144 struct minimal_symbol *msym;
1145 struct symbol *sym;
1146
1147 bfun = btrace_call_get (&it);
1148 sym = bfun->sym;
1149 msym = bfun->msym;
1150
1151 /* Print the function index. */
1152 uiout->field_unsigned ("index", bfun->number);
1153 uiout->text ("\t");
1154
1155 /* Indicate gaps in the trace. */
1156 if (bfun->errcode != 0)
1157 {
1158 const struct btrace_config *conf;
1159
1160 conf = btrace_conf (btinfo);
1161
1162 /* We have trace so we must have a configuration. */
1163 gdb_assert (conf != NULL);
1164
1165 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1166
1167 continue;
1168 }
1169
1170 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1171 {
1172 int level = bfun->level + btinfo->level, i;
1173
1174 for (i = 0; i < level; ++i)
1175 uiout->text (" ");
1176 }
1177
1178 if (sym != NULL)
1179 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1180 function_name_style.style ());
1181 else if (msym != NULL)
1182 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1183 function_name_style.style ());
1184 else if (!uiout->is_mi_like_p ())
1185 uiout->field_string ("function", "??",
1186 function_name_style.style ());
1187
1188 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1189 {
1190 uiout->text (_("\tinst "));
1191 btrace_call_history_insn_range (uiout, bfun);
1192 }
1193
1194 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1195 {
1196 uiout->text (_("\tat "));
1197 btrace_call_history_src_line (uiout, bfun);
1198 }
1199
1200 uiout->text ("\n");
1201 }
1202 }
1203
1204 /* The call_history method of target record-btrace. */
1205
1206 void
1207 record_btrace_target::call_history (int size, record_print_flags flags)
1208 {
1209 struct btrace_thread_info *btinfo;
1210 struct btrace_call_history *history;
1211 struct btrace_call_iterator begin, end;
1212 struct ui_out *uiout;
1213 unsigned int context, covered;
1214
1215 uiout = current_uiout;
1216 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1217 context = abs (size);
1218 if (context == 0)
1219 error (_("Bad record function-call-history-size."));
1220
1221 btinfo = require_btrace ();
1222 history = btinfo->call_history;
1223 if (history == NULL)
1224 {
1225 struct btrace_insn_iterator *replay;
1226
1227 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1228
1229 /* If we're replaying, we start at the replay position. Otherwise, we
1230 start at the tail of the trace. */
1231 replay = btinfo->replay;
1232 if (replay != NULL)
1233 {
1234 begin.btinfo = btinfo;
1235 begin.index = replay->call_index;
1236 }
1237 else
1238 btrace_call_end (&begin, btinfo);
1239
1240 /* We start from here and expand in the requested direction. Then we
1241 expand in the other direction, as well, to fill up any remaining
1242 context. */
1243 end = begin;
1244 if (size < 0)
1245 {
1246 /* We want the current position covered, as well. */
1247 covered = btrace_call_next (&end, 1);
1248 covered += btrace_call_prev (&begin, context - covered);
1249 covered += btrace_call_next (&end, context - covered);
1250 }
1251 else
1252 {
1253 covered = btrace_call_next (&end, context);
1254 covered += btrace_call_prev (&begin, context- covered);
1255 }
1256 }
1257 else
1258 {
1259 begin = history->begin;
1260 end = history->end;
1261
1262 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1263 btrace_call_number (&begin), btrace_call_number (&end));
1264
1265 if (size < 0)
1266 {
1267 end = begin;
1268 covered = btrace_call_prev (&begin, context);
1269 }
1270 else
1271 {
1272 begin = end;
1273 covered = btrace_call_next (&end, context);
1274 }
1275 }
1276
1277 if (covered > 0)
1278 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1279 else
1280 {
1281 if (size < 0)
1282 printf_unfiltered (_("At the start of the branch trace record.\n"));
1283 else
1284 printf_unfiltered (_("At the end of the branch trace record.\n"));
1285 }
1286
1287 btrace_set_call_history (btinfo, &begin, &end);
1288 }
1289
1290 /* The call_history_range method of target record-btrace. */
1291
1292 void
1293 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1294 record_print_flags flags)
1295 {
1296 struct btrace_thread_info *btinfo;
1297 struct btrace_call_iterator begin, end;
1298 struct ui_out *uiout;
1299 unsigned int low, high;
1300 int found;
1301
1302 uiout = current_uiout;
1303 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1304 low = from;
1305 high = to;
1306
1307 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1308
1309 /* Check for wrap-arounds. */
1310 if (low != from || high != to)
1311 error (_("Bad range."));
1312
1313 if (high < low)
1314 error (_("Bad range."));
1315
1316 btinfo = require_btrace ();
1317
1318 found = btrace_find_call_by_number (&begin, btinfo, low);
1319 if (found == 0)
1320 error (_("Range out of bounds."));
1321
1322 found = btrace_find_call_by_number (&end, btinfo, high);
1323 if (found == 0)
1324 {
1325 /* Silently truncate the range. */
1326 btrace_call_end (&end, btinfo);
1327 }
1328 else
1329 {
1330 /* We want both begin and end to be inclusive. */
1331 btrace_call_next (&end, 1);
1332 }
1333
1334 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1335 btrace_set_call_history (btinfo, &begin, &end);
1336 }
1337
1338 /* The call_history_from method of target record-btrace. */
1339
1340 void
1341 record_btrace_target::call_history_from (ULONGEST from, int size,
1342 record_print_flags flags)
1343 {
1344 ULONGEST begin, end, context;
1345
1346 context = abs (size);
1347 if (context == 0)
1348 error (_("Bad record function-call-history-size."));
1349
1350 if (size < 0)
1351 {
1352 end = from;
1353
1354 if (from < context)
1355 begin = 0;
1356 else
1357 begin = from - context + 1;
1358 }
1359 else
1360 {
1361 begin = from;
1362 end = from + context - 1;
1363
1364 /* Check for wrap-around. */
1365 if (end < begin)
1366 end = ULONGEST_MAX;
1367 }
1368
1369 call_history_range ( begin, end, flags);
1370 }
1371
1372 /* The record_method method of target record-btrace. */
1373
1374 enum record_method
1375 record_btrace_target::record_method (ptid_t ptid)
1376 {
1377 struct thread_info * const tp = find_thread_ptid (ptid);
1378
1379 if (tp == NULL)
1380 error (_("No thread."));
1381
1382 if (tp->btrace.target == NULL)
1383 return RECORD_METHOD_NONE;
1384
1385 return RECORD_METHOD_BTRACE;
1386 }
1387
1388 /* The record_is_replaying method of target record-btrace. */
1389
1390 bool
1391 record_btrace_target::record_is_replaying (ptid_t ptid)
1392 {
1393 for (thread_info *tp : all_non_exited_threads (ptid))
1394 if (btrace_is_replaying (tp))
1395 return true;
1396
1397 return false;
1398 }
1399
1400 /* The record_will_replay method of target record-btrace. */
1401
1402 bool
1403 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1404 {
1405 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1406 }
1407
1408 /* The xfer_partial method of target record-btrace. */
1409
1410 enum target_xfer_status
1411 record_btrace_target::xfer_partial (enum target_object object,
1412 const char *annex, gdb_byte *readbuf,
1413 const gdb_byte *writebuf, ULONGEST offset,
1414 ULONGEST len, ULONGEST *xfered_len)
1415 {
1416 /* Filter out requests that don't make sense during replay. */
1417 if (replay_memory_access == replay_memory_access_read_only
1418 && !record_btrace_generating_corefile
1419 && record_is_replaying (inferior_ptid))
1420 {
1421 switch (object)
1422 {
1423 case TARGET_OBJECT_MEMORY:
1424 {
1425 struct target_section *section;
1426
1427 /* We do not allow writing memory in general. */
1428 if (writebuf != NULL)
1429 {
1430 *xfered_len = len;
1431 return TARGET_XFER_UNAVAILABLE;
1432 }
1433
1434 /* We allow reading readonly memory. */
1435 section = target_section_by_addr (this, offset);
1436 if (section != NULL)
1437 {
1438 /* Check if the section we found is readonly. */
1439 if ((bfd_section_flags (section->the_bfd_section)
1440 & SEC_READONLY) != 0)
1441 {
1442 /* Truncate the request to fit into this section. */
1443 len = std::min (len, section->endaddr - offset);
1444 break;
1445 }
1446 }
1447
1448 *xfered_len = len;
1449 return TARGET_XFER_UNAVAILABLE;
1450 }
1451 }
1452 }
1453
1454 /* Forward the request. */
1455 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1456 offset, len, xfered_len);
1457 }
1458
1459 /* The insert_breakpoint method of target record-btrace. */
1460
1461 int
1462 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1463 struct bp_target_info *bp_tgt)
1464 {
1465 const char *old;
1466 int ret;
1467
1468 /* Inserting breakpoints requires accessing memory. Allow it for the
1469 duration of this function. */
1470 old = replay_memory_access;
1471 replay_memory_access = replay_memory_access_read_write;
1472
1473 ret = 0;
1474 try
1475 {
1476 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1477 }
1478 catch (const gdb_exception &except)
1479 {
1480 replay_memory_access = old;
1481 throw;
1482 }
1483 replay_memory_access = old;
1484
1485 return ret;
1486 }
1487
1488 /* The remove_breakpoint method of target record-btrace. */
1489
1490 int
1491 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1492 struct bp_target_info *bp_tgt,
1493 enum remove_bp_reason reason)
1494 {
1495 const char *old;
1496 int ret;
1497
1498 /* Removing breakpoints requires accessing memory. Allow it for the
1499 duration of this function. */
1500 old = replay_memory_access;
1501 replay_memory_access = replay_memory_access_read_write;
1502
1503 ret = 0;
1504 try
1505 {
1506 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1507 }
1508 catch (const gdb_exception &except)
1509 {
1510 replay_memory_access = old;
1511 throw;
1512 }
1513 replay_memory_access = old;
1514
1515 return ret;
1516 }
1517
1518 /* The fetch_registers method of target record-btrace. */
1519
1520 void
1521 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1522 {
1523 struct btrace_insn_iterator *replay;
1524 struct thread_info *tp;
1525
1526 tp = find_thread_ptid (regcache->ptid ());
1527 gdb_assert (tp != NULL);
1528
1529 replay = tp->btrace.replay;
1530 if (replay != NULL && !record_btrace_generating_corefile)
1531 {
1532 const struct btrace_insn *insn;
1533 struct gdbarch *gdbarch;
1534 int pcreg;
1535
1536 gdbarch = regcache->arch ();
1537 pcreg = gdbarch_pc_regnum (gdbarch);
1538 if (pcreg < 0)
1539 return;
1540
1541 /* We can only provide the PC register. */
1542 if (regno >= 0 && regno != pcreg)
1543 return;
1544
1545 insn = btrace_insn_get (replay);
1546 gdb_assert (insn != NULL);
1547
1548 regcache->raw_supply (regno, &insn->pc);
1549 }
1550 else
1551 this->beneath ()->fetch_registers (regcache, regno);
1552 }
1553
1554 /* The store_registers method of target record-btrace. */
1555
1556 void
1557 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1558 {
1559 if (!record_btrace_generating_corefile
1560 && record_is_replaying (regcache->ptid ()))
1561 error (_("Cannot write registers while replaying."));
1562
1563 gdb_assert (may_write_registers);
1564
1565 this->beneath ()->store_registers (regcache, regno);
1566 }
1567
1568 /* The prepare_to_store method of target record-btrace. */
1569
1570 void
1571 record_btrace_target::prepare_to_store (struct regcache *regcache)
1572 {
1573 if (!record_btrace_generating_corefile
1574 && record_is_replaying (regcache->ptid ()))
1575 return;
1576
1577 this->beneath ()->prepare_to_store (regcache);
1578 }
1579
1580 /* The branch trace frame cache. */
1581
1582 struct btrace_frame_cache
1583 {
1584 /* The thread. */
1585 struct thread_info *tp;
1586
1587 /* The frame info. */
1588 struct frame_info *frame;
1589
1590 /* The branch trace function segment. */
1591 const struct btrace_function *bfun;
1592 };
1593
1594 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1595
1596 static htab_t bfcache;
1597
1598 /* hash_f for htab_create_alloc of bfcache. */
1599
1600 static hashval_t
1601 bfcache_hash (const void *arg)
1602 {
1603 const struct btrace_frame_cache *cache
1604 = (const struct btrace_frame_cache *) arg;
1605
1606 return htab_hash_pointer (cache->frame);
1607 }
1608
1609 /* eq_f for htab_create_alloc of bfcache. */
1610
1611 static int
1612 bfcache_eq (const void *arg1, const void *arg2)
1613 {
1614 const struct btrace_frame_cache *cache1
1615 = (const struct btrace_frame_cache *) arg1;
1616 const struct btrace_frame_cache *cache2
1617 = (const struct btrace_frame_cache *) arg2;
1618
1619 return cache1->frame == cache2->frame;
1620 }
1621
1622 /* Create a new btrace frame cache. */
1623
1624 static struct btrace_frame_cache *
1625 bfcache_new (struct frame_info *frame)
1626 {
1627 struct btrace_frame_cache *cache;
1628 void **slot;
1629
1630 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1631 cache->frame = frame;
1632
1633 slot = htab_find_slot (bfcache, cache, INSERT);
1634 gdb_assert (*slot == NULL);
1635 *slot = cache;
1636
1637 return cache;
1638 }
1639
1640 /* Extract the branch trace function from a branch trace frame. */
1641
1642 static const struct btrace_function *
1643 btrace_get_frame_function (struct frame_info *frame)
1644 {
1645 const struct btrace_frame_cache *cache;
1646 struct btrace_frame_cache pattern;
1647 void **slot;
1648
1649 pattern.frame = frame;
1650
1651 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1652 if (slot == NULL)
1653 return NULL;
1654
1655 cache = (const struct btrace_frame_cache *) *slot;
1656 return cache->bfun;
1657 }
1658
1659 /* Implement stop_reason method for record_btrace_frame_unwind. */
1660
1661 static enum unwind_stop_reason
1662 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1663 void **this_cache)
1664 {
1665 const struct btrace_frame_cache *cache;
1666 const struct btrace_function *bfun;
1667
1668 cache = (const struct btrace_frame_cache *) *this_cache;
1669 bfun = cache->bfun;
1670 gdb_assert (bfun != NULL);
1671
1672 if (bfun->up == 0)
1673 return UNWIND_UNAVAILABLE;
1674
1675 return UNWIND_NO_REASON;
1676 }
1677
1678 /* Implement this_id method for record_btrace_frame_unwind. */
1679
1680 static void
1681 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1682 struct frame_id *this_id)
1683 {
1684 const struct btrace_frame_cache *cache;
1685 const struct btrace_function *bfun;
1686 struct btrace_call_iterator it;
1687 CORE_ADDR code, special;
1688
1689 cache = (const struct btrace_frame_cache *) *this_cache;
1690
1691 bfun = cache->bfun;
1692 gdb_assert (bfun != NULL);
1693
1694 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1695 bfun = btrace_call_get (&it);
1696
1697 code = get_frame_func (this_frame);
1698 special = bfun->number;
1699
1700 *this_id = frame_id_build_unavailable_stack_special (code, special);
1701
1702 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1703 btrace_get_bfun_name (cache->bfun),
1704 core_addr_to_string_nz (this_id->code_addr),
1705 core_addr_to_string_nz (this_id->special_addr));
1706 }
1707
1708 /* Implement prev_register method for record_btrace_frame_unwind. */
1709
1710 static struct value *
1711 record_btrace_frame_prev_register (struct frame_info *this_frame,
1712 void **this_cache,
1713 int regnum)
1714 {
1715 const struct btrace_frame_cache *cache;
1716 const struct btrace_function *bfun, *caller;
1717 struct btrace_call_iterator it;
1718 struct gdbarch *gdbarch;
1719 CORE_ADDR pc;
1720 int pcreg;
1721
1722 gdbarch = get_frame_arch (this_frame);
1723 pcreg = gdbarch_pc_regnum (gdbarch);
1724 if (pcreg < 0 || regnum != pcreg)
1725 throw_error (NOT_AVAILABLE_ERROR,
1726 _("Registers are not available in btrace record history"));
1727
1728 cache = (const struct btrace_frame_cache *) *this_cache;
1729 bfun = cache->bfun;
1730 gdb_assert (bfun != NULL);
1731
1732 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1733 throw_error (NOT_AVAILABLE_ERROR,
1734 _("No caller in btrace record history"));
1735
1736 caller = btrace_call_get (&it);
1737
1738 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1739 pc = caller->insn.front ().pc;
1740 else
1741 {
1742 pc = caller->insn.back ().pc;
1743 pc += gdb_insn_length (gdbarch, pc);
1744 }
1745
1746 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1747 btrace_get_bfun_name (bfun), bfun->level,
1748 core_addr_to_string_nz (pc));
1749
1750 return frame_unwind_got_address (this_frame, regnum, pc);
1751 }
1752
1753 /* Implement sniffer method for record_btrace_frame_unwind. */
1754
1755 static int
1756 record_btrace_frame_sniffer (const struct frame_unwind *self,
1757 struct frame_info *this_frame,
1758 void **this_cache)
1759 {
1760 const struct btrace_function *bfun;
1761 struct btrace_frame_cache *cache;
1762 struct thread_info *tp;
1763 struct frame_info *next;
1764
1765 /* THIS_FRAME does not contain a reference to its thread. */
1766 tp = inferior_thread ();
1767
1768 bfun = NULL;
1769 next = get_next_frame (this_frame);
1770 if (next == NULL)
1771 {
1772 const struct btrace_insn_iterator *replay;
1773
1774 replay = tp->btrace.replay;
1775 if (replay != NULL)
1776 bfun = &replay->btinfo->functions[replay->call_index];
1777 }
1778 else
1779 {
1780 const struct btrace_function *callee;
1781 struct btrace_call_iterator it;
1782
1783 callee = btrace_get_frame_function (next);
1784 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1785 return 0;
1786
1787 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1788 return 0;
1789
1790 bfun = btrace_call_get (&it);
1791 }
1792
1793 if (bfun == NULL)
1794 return 0;
1795
1796 DEBUG ("[frame] sniffed frame for %s on level %d",
1797 btrace_get_bfun_name (bfun), bfun->level);
1798
1799 /* This is our frame. Initialize the frame cache. */
1800 cache = bfcache_new (this_frame);
1801 cache->tp = tp;
1802 cache->bfun = bfun;
1803
1804 *this_cache = cache;
1805 return 1;
1806 }
1807
1808 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1809
1810 static int
1811 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1812 struct frame_info *this_frame,
1813 void **this_cache)
1814 {
1815 const struct btrace_function *bfun, *callee;
1816 struct btrace_frame_cache *cache;
1817 struct btrace_call_iterator it;
1818 struct frame_info *next;
1819 struct thread_info *tinfo;
1820
1821 next = get_next_frame (this_frame);
1822 if (next == NULL)
1823 return 0;
1824
1825 callee = btrace_get_frame_function (next);
1826 if (callee == NULL)
1827 return 0;
1828
1829 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1830 return 0;
1831
1832 tinfo = inferior_thread ();
1833 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1834 return 0;
1835
1836 bfun = btrace_call_get (&it);
1837
1838 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1839 btrace_get_bfun_name (bfun), bfun->level);
1840
1841 /* This is our frame. Initialize the frame cache. */
1842 cache = bfcache_new (this_frame);
1843 cache->tp = tinfo;
1844 cache->bfun = bfun;
1845
1846 *this_cache = cache;
1847 return 1;
1848 }
1849
1850 static void
1851 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1852 {
1853 struct btrace_frame_cache *cache;
1854 void **slot;
1855
1856 cache = (struct btrace_frame_cache *) this_cache;
1857
1858 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1859 gdb_assert (slot != NULL);
1860
1861 htab_remove_elt (bfcache, cache);
1862 }
1863
1864 /* btrace recording does not store previous memory content, neither the stack
1865 frames content. Any unwinding would return errorneous results as the stack
1866 contents no longer matches the changed PC value restored from history.
1867 Therefore this unwinder reports any possibly unwound registers as
1868 <unavailable>. */
1869
1870 const struct frame_unwind record_btrace_frame_unwind =
1871 {
1872 NORMAL_FRAME,
1873 record_btrace_frame_unwind_stop_reason,
1874 record_btrace_frame_this_id,
1875 record_btrace_frame_prev_register,
1876 NULL,
1877 record_btrace_frame_sniffer,
1878 record_btrace_frame_dealloc_cache
1879 };
1880
1881 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1882 {
1883 TAILCALL_FRAME,
1884 record_btrace_frame_unwind_stop_reason,
1885 record_btrace_frame_this_id,
1886 record_btrace_frame_prev_register,
1887 NULL,
1888 record_btrace_tailcall_frame_sniffer,
1889 record_btrace_frame_dealloc_cache
1890 };
1891
1892 /* Implement the get_unwinder method. */
1893
1894 const struct frame_unwind *
1895 record_btrace_target::get_unwinder ()
1896 {
1897 return &record_btrace_frame_unwind;
1898 }
1899
1900 /* Implement the get_tailcall_unwinder method. */
1901
1902 const struct frame_unwind *
1903 record_btrace_target::get_tailcall_unwinder ()
1904 {
1905 return &record_btrace_tailcall_frame_unwind;
1906 }
1907
1908 /* Return a human-readable string for FLAG. */
1909
1910 static const char *
1911 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1912 {
1913 switch (flag)
1914 {
1915 case BTHR_STEP:
1916 return "step";
1917
1918 case BTHR_RSTEP:
1919 return "reverse-step";
1920
1921 case BTHR_CONT:
1922 return "cont";
1923
1924 case BTHR_RCONT:
1925 return "reverse-cont";
1926
1927 case BTHR_STOP:
1928 return "stop";
1929 }
1930
1931 return "<invalid>";
1932 }
1933
1934 /* Indicate that TP should be resumed according to FLAG. */
1935
1936 static void
1937 record_btrace_resume_thread (struct thread_info *tp,
1938 enum btrace_thread_flag flag)
1939 {
1940 struct btrace_thread_info *btinfo;
1941
1942 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1943 target_pid_to_str (tp->ptid).c_str (), flag,
1944 btrace_thread_flag_to_str (flag));
1945
1946 btinfo = &tp->btrace;
1947
1948 /* Fetch the latest branch trace. */
1949 btrace_fetch (tp, record_btrace_get_cpu ());
1950
1951 /* A resume request overwrites a preceding resume or stop request. */
1952 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1953 btinfo->flags |= flag;
1954 }
1955
1956 /* Get the current frame for TP. */
1957
1958 static struct frame_id
1959 get_thread_current_frame_id (struct thread_info *tp)
1960 {
1961 struct frame_id id;
1962 int executing;
1963
1964 /* Set current thread, which is implicitly used by
1965 get_current_frame. */
1966 scoped_restore_current_thread restore_thread;
1967
1968 switch_to_thread (tp);
1969
1970 /* Clear the executing flag to allow changes to the current frame.
1971 We are not actually running, yet. We just started a reverse execution
1972 command or a record goto command.
1973 For the latter, EXECUTING is false and this has no effect.
1974 For the former, EXECUTING is true and we're in wait, about to
1975 move the thread. Since we need to recompute the stack, we temporarily
1976 set EXECUTING to flase. */
1977 executing = tp->executing;
1978 set_executing (inferior_ptid, false);
1979
1980 id = null_frame_id;
1981 try
1982 {
1983 id = get_frame_id (get_current_frame ());
1984 }
1985 catch (const gdb_exception &except)
1986 {
1987 /* Restore the previous execution state. */
1988 set_executing (inferior_ptid, executing);
1989
1990 throw;
1991 }
1992
1993 /* Restore the previous execution state. */
1994 set_executing (inferior_ptid, executing);
1995
1996 return id;
1997 }
1998
1999 /* Start replaying a thread. */
2000
2001 static struct btrace_insn_iterator *
2002 record_btrace_start_replaying (struct thread_info *tp)
2003 {
2004 struct btrace_insn_iterator *replay;
2005 struct btrace_thread_info *btinfo;
2006
2007 btinfo = &tp->btrace;
2008 replay = NULL;
2009
2010 /* We can't start replaying without trace. */
2011 if (btinfo->functions.empty ())
2012 return NULL;
2013
2014 /* GDB stores the current frame_id when stepping in order to detects steps
2015 into subroutines.
2016 Since frames are computed differently when we're replaying, we need to
2017 recompute those stored frames and fix them up so we can still detect
2018 subroutines after we started replaying. */
2019 try
2020 {
2021 struct frame_id frame_id;
2022 int upd_step_frame_id, upd_step_stack_frame_id;
2023
2024 /* The current frame without replaying - computed via normal unwind. */
2025 frame_id = get_thread_current_frame_id (tp);
2026
2027 /* Check if we need to update any stepping-related frame id's. */
2028 upd_step_frame_id = frame_id_eq (frame_id,
2029 tp->control.step_frame_id);
2030 upd_step_stack_frame_id = frame_id_eq (frame_id,
2031 tp->control.step_stack_frame_id);
2032
2033 /* We start replaying at the end of the branch trace. This corresponds
2034 to the current instruction. */
2035 replay = XNEW (struct btrace_insn_iterator);
2036 btrace_insn_end (replay, btinfo);
2037
2038 /* Skip gaps at the end of the trace. */
2039 while (btrace_insn_get (replay) == NULL)
2040 {
2041 unsigned int steps;
2042
2043 steps = btrace_insn_prev (replay, 1);
2044 if (steps == 0)
2045 error (_("No trace."));
2046 }
2047
2048 /* We're not replaying, yet. */
2049 gdb_assert (btinfo->replay == NULL);
2050 btinfo->replay = replay;
2051
2052 /* Make sure we're not using any stale registers. */
2053 registers_changed_thread (tp);
2054
2055 /* The current frame with replaying - computed via btrace unwind. */
2056 frame_id = get_thread_current_frame_id (tp);
2057
2058 /* Replace stepping related frames where necessary. */
2059 if (upd_step_frame_id)
2060 tp->control.step_frame_id = frame_id;
2061 if (upd_step_stack_frame_id)
2062 tp->control.step_stack_frame_id = frame_id;
2063 }
2064 catch (const gdb_exception &except)
2065 {
2066 xfree (btinfo->replay);
2067 btinfo->replay = NULL;
2068
2069 registers_changed_thread (tp);
2070
2071 throw;
2072 }
2073
2074 return replay;
2075 }
2076
2077 /* Stop replaying a thread. */
2078
2079 static void
2080 record_btrace_stop_replaying (struct thread_info *tp)
2081 {
2082 struct btrace_thread_info *btinfo;
2083
2084 btinfo = &tp->btrace;
2085
2086 xfree (btinfo->replay);
2087 btinfo->replay = NULL;
2088
2089 /* Make sure we're not leaving any stale registers. */
2090 registers_changed_thread (tp);
2091 }
2092
2093 /* Stop replaying TP if it is at the end of its execution history. */
2094
2095 static void
2096 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2097 {
2098 struct btrace_insn_iterator *replay, end;
2099 struct btrace_thread_info *btinfo;
2100
2101 btinfo = &tp->btrace;
2102 replay = btinfo->replay;
2103
2104 if (replay == NULL)
2105 return;
2106
2107 btrace_insn_end (&end, btinfo);
2108
2109 if (btrace_insn_cmp (replay, &end) == 0)
2110 record_btrace_stop_replaying (tp);
2111 }
2112
2113 /* The resume method of target record-btrace. */
2114
2115 void
2116 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2117 {
2118 enum btrace_thread_flag flag, cflag;
2119
2120 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2121 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2122 step ? "step" : "cont");
2123
2124 /* Store the execution direction of the last resume.
2125
2126 If there is more than one resume call, we have to rely on infrun
2127 to not change the execution direction in-between. */
2128 record_btrace_resume_exec_dir = ::execution_direction;
2129
2130 /* As long as we're not replaying, just forward the request.
2131
2132 For non-stop targets this means that no thread is replaying. In order to
2133 make progress, we may need to explicitly move replaying threads to the end
2134 of their execution history. */
2135 if ((::execution_direction != EXEC_REVERSE)
2136 && !record_is_replaying (minus_one_ptid))
2137 {
2138 this->beneath ()->resume (ptid, step, signal);
2139 return;
2140 }
2141
2142 /* Compute the btrace thread flag for the requested move. */
2143 if (::execution_direction == EXEC_REVERSE)
2144 {
2145 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2146 cflag = BTHR_RCONT;
2147 }
2148 else
2149 {
2150 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2151 cflag = BTHR_CONT;
2152 }
2153
2154 /* We just indicate the resume intent here. The actual stepping happens in
2155 record_btrace_wait below.
2156
2157 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2158 if (!target_is_non_stop_p ())
2159 {
2160 gdb_assert (inferior_ptid.matches (ptid));
2161
2162 for (thread_info *tp : all_non_exited_threads (ptid))
2163 {
2164 if (tp->ptid.matches (inferior_ptid))
2165 record_btrace_resume_thread (tp, flag);
2166 else
2167 record_btrace_resume_thread (tp, cflag);
2168 }
2169 }
2170 else
2171 {
2172 for (thread_info *tp : all_non_exited_threads (ptid))
2173 record_btrace_resume_thread (tp, flag);
2174 }
2175
2176 /* Async support. */
2177 if (target_can_async_p ())
2178 {
2179 target_async (1);
2180 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2181 }
2182 }
2183
2184 /* The commit_resume method of target record-btrace. */
2185
2186 void
2187 record_btrace_target::commit_resume ()
2188 {
2189 if ((::execution_direction != EXEC_REVERSE)
2190 && !record_is_replaying (minus_one_ptid))
2191 beneath ()->commit_resume ();
2192 }
2193
2194 /* Cancel resuming TP. */
2195
2196 static void
2197 record_btrace_cancel_resume (struct thread_info *tp)
2198 {
2199 enum btrace_thread_flag flags;
2200
2201 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2202 if (flags == 0)
2203 return;
2204
2205 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2206 print_thread_id (tp),
2207 target_pid_to_str (tp->ptid).c_str (), flags,
2208 btrace_thread_flag_to_str (flags));
2209
2210 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2211 record_btrace_stop_replaying_at_end (tp);
2212 }
2213
2214 /* Return a target_waitstatus indicating that we ran out of history. */
2215
2216 static struct target_waitstatus
2217 btrace_step_no_history (void)
2218 {
2219 struct target_waitstatus status;
2220
2221 status.kind = TARGET_WAITKIND_NO_HISTORY;
2222
2223 return status;
2224 }
2225
2226 /* Return a target_waitstatus indicating that a step finished. */
2227
2228 static struct target_waitstatus
2229 btrace_step_stopped (void)
2230 {
2231 struct target_waitstatus status;
2232
2233 status.kind = TARGET_WAITKIND_STOPPED;
2234 status.value.sig = GDB_SIGNAL_TRAP;
2235
2236 return status;
2237 }
2238
2239 /* Return a target_waitstatus indicating that a thread was stopped as
2240 requested. */
2241
2242 static struct target_waitstatus
2243 btrace_step_stopped_on_request (void)
2244 {
2245 struct target_waitstatus status;
2246
2247 status.kind = TARGET_WAITKIND_STOPPED;
2248 status.value.sig = GDB_SIGNAL_0;
2249
2250 return status;
2251 }
2252
2253 /* Return a target_waitstatus indicating a spurious stop. */
2254
2255 static struct target_waitstatus
2256 btrace_step_spurious (void)
2257 {
2258 struct target_waitstatus status;
2259
2260 status.kind = TARGET_WAITKIND_SPURIOUS;
2261
2262 return status;
2263 }
2264
2265 /* Return a target_waitstatus indicating that the thread was not resumed. */
2266
2267 static struct target_waitstatus
2268 btrace_step_no_resumed (void)
2269 {
2270 struct target_waitstatus status;
2271
2272 status.kind = TARGET_WAITKIND_NO_RESUMED;
2273
2274 return status;
2275 }
2276
2277 /* Return a target_waitstatus indicating that we should wait again. */
2278
2279 static struct target_waitstatus
2280 btrace_step_again (void)
2281 {
2282 struct target_waitstatus status;
2283
2284 status.kind = TARGET_WAITKIND_IGNORE;
2285
2286 return status;
2287 }
2288
2289 /* Clear the record histories. */
2290
2291 static void
2292 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2293 {
2294 xfree (btinfo->insn_history);
2295 xfree (btinfo->call_history);
2296
2297 btinfo->insn_history = NULL;
2298 btinfo->call_history = NULL;
2299 }
2300
2301 /* Check whether TP's current replay position is at a breakpoint. */
2302
2303 static int
2304 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2305 {
2306 struct btrace_insn_iterator *replay;
2307 struct btrace_thread_info *btinfo;
2308 const struct btrace_insn *insn;
2309
2310 btinfo = &tp->btrace;
2311 replay = btinfo->replay;
2312
2313 if (replay == NULL)
2314 return 0;
2315
2316 insn = btrace_insn_get (replay);
2317 if (insn == NULL)
2318 return 0;
2319
2320 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2321 &btinfo->stop_reason);
2322 }
2323
2324 /* Step one instruction in forward direction. */
2325
2326 static struct target_waitstatus
2327 record_btrace_single_step_forward (struct thread_info *tp)
2328 {
2329 struct btrace_insn_iterator *replay, end, start;
2330 struct btrace_thread_info *btinfo;
2331
2332 btinfo = &tp->btrace;
2333 replay = btinfo->replay;
2334
2335 /* We're done if we're not replaying. */
2336 if (replay == NULL)
2337 return btrace_step_no_history ();
2338
2339 /* Check if we're stepping a breakpoint. */
2340 if (record_btrace_replay_at_breakpoint (tp))
2341 return btrace_step_stopped ();
2342
2343 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2344 jump back to the instruction at which we started. */
2345 start = *replay;
2346 do
2347 {
2348 unsigned int steps;
2349
2350 /* We will bail out here if we continue stepping after reaching the end
2351 of the execution history. */
2352 steps = btrace_insn_next (replay, 1);
2353 if (steps == 0)
2354 {
2355 *replay = start;
2356 return btrace_step_no_history ();
2357 }
2358 }
2359 while (btrace_insn_get (replay) == NULL);
2360
2361 /* Determine the end of the instruction trace. */
2362 btrace_insn_end (&end, btinfo);
2363
2364 /* The execution trace contains (and ends with) the current instruction.
2365 This instruction has not been executed, yet, so the trace really ends
2366 one instruction earlier. */
2367 if (btrace_insn_cmp (replay, &end) == 0)
2368 return btrace_step_no_history ();
2369
2370 return btrace_step_spurious ();
2371 }
2372
2373 /* Step one instruction in backward direction. */
2374
2375 static struct target_waitstatus
2376 record_btrace_single_step_backward (struct thread_info *tp)
2377 {
2378 struct btrace_insn_iterator *replay, start;
2379 struct btrace_thread_info *btinfo;
2380
2381 btinfo = &tp->btrace;
2382 replay = btinfo->replay;
2383
2384 /* Start replaying if we're not already doing so. */
2385 if (replay == NULL)
2386 replay = record_btrace_start_replaying (tp);
2387
2388 /* If we can't step any further, we reached the end of the history.
2389 Skip gaps during replay. If we end up at a gap (at the beginning of
2390 the trace), jump back to the instruction at which we started. */
2391 start = *replay;
2392 do
2393 {
2394 unsigned int steps;
2395
2396 steps = btrace_insn_prev (replay, 1);
2397 if (steps == 0)
2398 {
2399 *replay = start;
2400 return btrace_step_no_history ();
2401 }
2402 }
2403 while (btrace_insn_get (replay) == NULL);
2404
2405 /* Check if we're stepping a breakpoint.
2406
2407 For reverse-stepping, this check is after the step. There is logic in
2408 infrun.c that handles reverse-stepping separately. See, for example,
2409 proceed and adjust_pc_after_break.
2410
2411 This code assumes that for reverse-stepping, PC points to the last
2412 de-executed instruction, whereas for forward-stepping PC points to the
2413 next to-be-executed instruction. */
2414 if (record_btrace_replay_at_breakpoint (tp))
2415 return btrace_step_stopped ();
2416
2417 return btrace_step_spurious ();
2418 }
2419
2420 /* Step a single thread. */
2421
2422 static struct target_waitstatus
2423 record_btrace_step_thread (struct thread_info *tp)
2424 {
2425 struct btrace_thread_info *btinfo;
2426 struct target_waitstatus status;
2427 enum btrace_thread_flag flags;
2428
2429 btinfo = &tp->btrace;
2430
2431 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2432 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2433
2434 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2435 target_pid_to_str (tp->ptid).c_str (), flags,
2436 btrace_thread_flag_to_str (flags));
2437
2438 /* We can't step without an execution history. */
2439 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2440 return btrace_step_no_history ();
2441
2442 switch (flags)
2443 {
2444 default:
2445 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2446
2447 case BTHR_STOP:
2448 return btrace_step_stopped_on_request ();
2449
2450 case BTHR_STEP:
2451 status = record_btrace_single_step_forward (tp);
2452 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2453 break;
2454
2455 return btrace_step_stopped ();
2456
2457 case BTHR_RSTEP:
2458 status = record_btrace_single_step_backward (tp);
2459 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2460 break;
2461
2462 return btrace_step_stopped ();
2463
2464 case BTHR_CONT:
2465 status = record_btrace_single_step_forward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2467 break;
2468
2469 btinfo->flags |= flags;
2470 return btrace_step_again ();
2471
2472 case BTHR_RCONT:
2473 status = record_btrace_single_step_backward (tp);
2474 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2475 break;
2476
2477 btinfo->flags |= flags;
2478 return btrace_step_again ();
2479 }
2480
2481 /* We keep threads moving at the end of their execution history. The wait
2482 method will stop the thread for whom the event is reported. */
2483 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2484 btinfo->flags |= flags;
2485
2486 return status;
2487 }
2488
2489 /* Announce further events if necessary. */
2490
2491 static void
2492 record_btrace_maybe_mark_async_event
2493 (const std::vector<thread_info *> &moving,
2494 const std::vector<thread_info *> &no_history)
2495 {
2496 bool more_moving = !moving.empty ();
2497 bool more_no_history = !no_history.empty ();;
2498
2499 if (!more_moving && !more_no_history)
2500 return;
2501
2502 if (more_moving)
2503 DEBUG ("movers pending");
2504
2505 if (more_no_history)
2506 DEBUG ("no-history pending");
2507
2508 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2509 }
2510
2511 /* The wait method of target record-btrace. */
2512
2513 ptid_t
2514 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2515 int options)
2516 {
2517 std::vector<thread_info *> moving;
2518 std::vector<thread_info *> no_history;
2519
2520 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2521
2522 /* As long as we're not replaying, just forward the request. */
2523 if ((::execution_direction != EXEC_REVERSE)
2524 && !record_is_replaying (minus_one_ptid))
2525 {
2526 return this->beneath ()->wait (ptid, status, options);
2527 }
2528
2529 /* Keep a work list of moving threads. */
2530 for (thread_info *tp : all_non_exited_threads (ptid))
2531 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2532 moving.push_back (tp);
2533
2534 if (moving.empty ())
2535 {
2536 *status = btrace_step_no_resumed ();
2537
2538 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2539 target_waitstatus_to_string (status).c_str ());
2540
2541 return null_ptid;
2542 }
2543
2544 /* Step moving threads one by one, one step each, until either one thread
2545 reports an event or we run out of threads to step.
2546
2547 When stepping more than one thread, chances are that some threads reach
2548 the end of their execution history earlier than others. If we reported
2549 this immediately, all-stop on top of non-stop would stop all threads and
2550 resume the same threads next time. And we would report the same thread
2551 having reached the end of its execution history again.
2552
2553 In the worst case, this would starve the other threads. But even if other
2554 threads would be allowed to make progress, this would result in far too
2555 many intermediate stops.
2556
2557 We therefore delay the reporting of "no execution history" until we have
2558 nothing else to report. By this time, all threads should have moved to
2559 either the beginning or the end of their execution history. There will
2560 be a single user-visible stop. */
2561 struct thread_info *eventing = NULL;
2562 while ((eventing == NULL) && !moving.empty ())
2563 {
2564 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2565 {
2566 thread_info *tp = moving[ix];
2567
2568 *status = record_btrace_step_thread (tp);
2569
2570 switch (status->kind)
2571 {
2572 case TARGET_WAITKIND_IGNORE:
2573 ix++;
2574 break;
2575
2576 case TARGET_WAITKIND_NO_HISTORY:
2577 no_history.push_back (ordered_remove (moving, ix));
2578 break;
2579
2580 default:
2581 eventing = unordered_remove (moving, ix);
2582 break;
2583 }
2584 }
2585 }
2586
2587 if (eventing == NULL)
2588 {
2589 /* We started with at least one moving thread. This thread must have
2590 either stopped or reached the end of its execution history.
2591
2592 In the former case, EVENTING must not be NULL.
2593 In the latter case, NO_HISTORY must not be empty. */
2594 gdb_assert (!no_history.empty ());
2595
2596 /* We kept threads moving at the end of their execution history. Stop
2597 EVENTING now that we are going to report its stop. */
2598 eventing = unordered_remove (no_history, 0);
2599 eventing->btrace.flags &= ~BTHR_MOVE;
2600
2601 *status = btrace_step_no_history ();
2602 }
2603
2604 gdb_assert (eventing != NULL);
2605
2606 /* We kept threads replaying at the end of their execution history. Stop
2607 replaying EVENTING now that we are going to report its stop. */
2608 record_btrace_stop_replaying_at_end (eventing);
2609
2610 /* Stop all other threads. */
2611 if (!target_is_non_stop_p ())
2612 {
2613 for (thread_info *tp : all_non_exited_threads ())
2614 record_btrace_cancel_resume (tp);
2615 }
2616
2617 /* In async mode, we need to announce further events. */
2618 if (target_is_async_p ())
2619 record_btrace_maybe_mark_async_event (moving, no_history);
2620
2621 /* Start record histories anew from the current position. */
2622 record_btrace_clear_histories (&eventing->btrace);
2623
2624 /* We moved the replay position but did not update registers. */
2625 registers_changed_thread (eventing);
2626
2627 DEBUG ("wait ended by thread %s (%s): %s",
2628 print_thread_id (eventing),
2629 target_pid_to_str (eventing->ptid).c_str (),
2630 target_waitstatus_to_string (status).c_str ());
2631
2632 return eventing->ptid;
2633 }
2634
2635 /* The stop method of target record-btrace. */
2636
2637 void
2638 record_btrace_target::stop (ptid_t ptid)
2639 {
2640 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2641
2642 /* As long as we're not replaying, just forward the request. */
2643 if ((::execution_direction != EXEC_REVERSE)
2644 && !record_is_replaying (minus_one_ptid))
2645 {
2646 this->beneath ()->stop (ptid);
2647 }
2648 else
2649 {
2650 for (thread_info *tp : all_non_exited_threads (ptid))
2651 {
2652 tp->btrace.flags &= ~BTHR_MOVE;
2653 tp->btrace.flags |= BTHR_STOP;
2654 }
2655 }
2656 }
2657
2658 /* The can_execute_reverse method of target record-btrace. */
2659
2660 bool
2661 record_btrace_target::can_execute_reverse ()
2662 {
2663 return true;
2664 }
2665
2666 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2667
2668 bool
2669 record_btrace_target::stopped_by_sw_breakpoint ()
2670 {
2671 if (record_is_replaying (minus_one_ptid))
2672 {
2673 struct thread_info *tp = inferior_thread ();
2674
2675 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2676 }
2677
2678 return this->beneath ()->stopped_by_sw_breakpoint ();
2679 }
2680
2681 /* The supports_stopped_by_sw_breakpoint method of target
2682 record-btrace. */
2683
2684 bool
2685 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2686 {
2687 if (record_is_replaying (minus_one_ptid))
2688 return true;
2689
2690 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2691 }
2692
2693 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2694
2695 bool
2696 record_btrace_target::stopped_by_hw_breakpoint ()
2697 {
2698 if (record_is_replaying (minus_one_ptid))
2699 {
2700 struct thread_info *tp = inferior_thread ();
2701
2702 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2703 }
2704
2705 return this->beneath ()->stopped_by_hw_breakpoint ();
2706 }
2707
2708 /* The supports_stopped_by_hw_breakpoint method of target
2709 record-btrace. */
2710
2711 bool
2712 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2713 {
2714 if (record_is_replaying (minus_one_ptid))
2715 return true;
2716
2717 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2718 }
2719
2720 /* The update_thread_list method of target record-btrace. */
2721
2722 void
2723 record_btrace_target::update_thread_list ()
2724 {
2725 /* We don't add or remove threads during replay. */
2726 if (record_is_replaying (minus_one_ptid))
2727 return;
2728
2729 /* Forward the request. */
2730 this->beneath ()->update_thread_list ();
2731 }
2732
2733 /* The thread_alive method of target record-btrace. */
2734
2735 bool
2736 record_btrace_target::thread_alive (ptid_t ptid)
2737 {
2738 /* We don't add or remove threads during replay. */
2739 if (record_is_replaying (minus_one_ptid))
2740 return true;
2741
2742 /* Forward the request. */
2743 return this->beneath ()->thread_alive (ptid);
2744 }
2745
2746 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2747 is stopped. */
2748
2749 static void
2750 record_btrace_set_replay (struct thread_info *tp,
2751 const struct btrace_insn_iterator *it)
2752 {
2753 struct btrace_thread_info *btinfo;
2754
2755 btinfo = &tp->btrace;
2756
2757 if (it == NULL)
2758 record_btrace_stop_replaying (tp);
2759 else
2760 {
2761 if (btinfo->replay == NULL)
2762 record_btrace_start_replaying (tp);
2763 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2764 return;
2765
2766 *btinfo->replay = *it;
2767 registers_changed_thread (tp);
2768 }
2769
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo);
2772
2773 inferior_thread ()->suspend.stop_pc
2774 = regcache_read_pc (get_current_regcache ());
2775 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2776 }
2777
2778 /* The goto_record_begin method of target record-btrace. */
2779
2780 void
2781 record_btrace_target::goto_record_begin ()
2782 {
2783 struct thread_info *tp;
2784 struct btrace_insn_iterator begin;
2785
2786 tp = require_btrace_thread ();
2787
2788 btrace_insn_begin (&begin, &tp->btrace);
2789
2790 /* Skip gaps at the beginning of the trace. */
2791 while (btrace_insn_get (&begin) == NULL)
2792 {
2793 unsigned int steps;
2794
2795 steps = btrace_insn_next (&begin, 1);
2796 if (steps == 0)
2797 error (_("No trace."));
2798 }
2799
2800 record_btrace_set_replay (tp, &begin);
2801 }
2802
2803 /* The goto_record_end method of target record-btrace. */
2804
2805 void
2806 record_btrace_target::goto_record_end ()
2807 {
2808 struct thread_info *tp;
2809
2810 tp = require_btrace_thread ();
2811
2812 record_btrace_set_replay (tp, NULL);
2813 }
2814
2815 /* The goto_record method of target record-btrace. */
2816
2817 void
2818 record_btrace_target::goto_record (ULONGEST insn)
2819 {
2820 struct thread_info *tp;
2821 struct btrace_insn_iterator it;
2822 unsigned int number;
2823 int found;
2824
2825 number = insn;
2826
2827 /* Check for wrap-arounds. */
2828 if (number != insn)
2829 error (_("Instruction number out of range."));
2830
2831 tp = require_btrace_thread ();
2832
2833 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2834
2835 /* Check if the instruction could not be found or is a gap. */
2836 if (found == 0 || btrace_insn_get (&it) == NULL)
2837 error (_("No such instruction."));
2838
2839 record_btrace_set_replay (tp, &it);
2840 }
2841
2842 /* The record_stop_replaying method of target record-btrace. */
2843
2844 void
2845 record_btrace_target::record_stop_replaying ()
2846 {
2847 for (thread_info *tp : all_non_exited_threads ())
2848 record_btrace_stop_replaying (tp);
2849 }
2850
2851 /* The execution_direction target method. */
2852
2853 enum exec_direction_kind
2854 record_btrace_target::execution_direction ()
2855 {
2856 return record_btrace_resume_exec_dir;
2857 }
2858
2859 /* The prepare_to_generate_core target method. */
2860
2861 void
2862 record_btrace_target::prepare_to_generate_core ()
2863 {
2864 record_btrace_generating_corefile = 1;
2865 }
2866
2867 /* The done_generating_core target method. */
2868
2869 void
2870 record_btrace_target::done_generating_core ()
2871 {
2872 record_btrace_generating_corefile = 0;
2873 }
2874
2875 /* Start recording in BTS format. */
2876
2877 static void
2878 cmd_record_btrace_bts_start (const char *args, int from_tty)
2879 {
2880 if (args != NULL && *args != 0)
2881 error (_("Invalid argument."));
2882
2883 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2884
2885 try
2886 {
2887 execute_command ("target record-btrace", from_tty);
2888 }
2889 catch (const gdb_exception &exception)
2890 {
2891 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2892 throw;
2893 }
2894 }
2895
2896 /* Start recording in Intel Processor Trace format. */
2897
2898 static void
2899 cmd_record_btrace_pt_start (const char *args, int from_tty)
2900 {
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2903
2904 record_btrace_conf.format = BTRACE_FORMAT_PT;
2905
2906 try
2907 {
2908 execute_command ("target record-btrace", from_tty);
2909 }
2910 catch (const gdb_exception &exception)
2911 {
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2913 throw;
2914 }
2915 }
2916
2917 /* Alias for "target record". */
2918
2919 static void
2920 cmd_record_btrace_start (const char *args, int from_tty)
2921 {
2922 if (args != NULL && *args != 0)
2923 error (_("Invalid argument."));
2924
2925 record_btrace_conf.format = BTRACE_FORMAT_PT;
2926
2927 try
2928 {
2929 execute_command ("target record-btrace", from_tty);
2930 }
2931 catch (const gdb_exception &exception)
2932 {
2933 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2934
2935 try
2936 {
2937 execute_command ("target record-btrace", from_tty);
2938 }
2939 catch (const gdb_exception &ex)
2940 {
2941 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2942 throw;
2943 }
2944 }
2945 }
2946
2947 /* The "set record btrace" command. */
2948
2949 static void
2950 cmd_set_record_btrace (const char *args, int from_tty)
2951 {
2952 printf_unfiltered (_("\"set record btrace\" must be followed "
2953 "by an appropriate subcommand.\n"));
2954 help_list (set_record_btrace_cmdlist, "set record btrace ",
2955 all_commands, gdb_stdout);
2956 }
2957
2958 /* The "show record btrace" command. */
2959
2960 static void
2961 cmd_show_record_btrace (const char *args, int from_tty)
2962 {
2963 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2964 }
2965
2966 /* The "show record btrace replay-memory-access" command. */
2967
2968 static void
2969 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2970 struct cmd_list_element *c, const char *value)
2971 {
2972 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2973 replay_memory_access);
2974 }
2975
2976 /* The "set record btrace cpu none" command. */
2977
2978 static void
2979 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2980 {
2981 if (args != nullptr && *args != 0)
2982 error (_("Trailing junk: '%s'."), args);
2983
2984 record_btrace_cpu_state = CS_NONE;
2985 }
2986
2987 /* The "set record btrace cpu auto" command. */
2988
2989 static void
2990 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2991 {
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2994
2995 record_btrace_cpu_state = CS_AUTO;
2996 }
2997
2998 /* The "set record btrace cpu" command. */
2999
3000 static void
3001 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3002 {
3003 if (args == nullptr)
3004 args = "";
3005
3006 /* We use a hard-coded vendor string for now. */
3007 unsigned int family, model, stepping;
3008 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3009 &model, &l1, &stepping, &l2);
3010 if (matches == 3)
3011 {
3012 if (strlen (args) != l2)
3013 error (_("Trailing junk: '%s'."), args + l2);
3014 }
3015 else if (matches == 2)
3016 {
3017 if (strlen (args) != l1)
3018 error (_("Trailing junk: '%s'."), args + l1);
3019
3020 stepping = 0;
3021 }
3022 else
3023 error (_("Bad format. See \"help set record btrace cpu\"."));
3024
3025 if (USHRT_MAX < family)
3026 error (_("Cpu family too big."));
3027
3028 if (UCHAR_MAX < model)
3029 error (_("Cpu model too big."));
3030
3031 if (UCHAR_MAX < stepping)
3032 error (_("Cpu stepping too big."));
3033
3034 record_btrace_cpu.vendor = CV_INTEL;
3035 record_btrace_cpu.family = family;
3036 record_btrace_cpu.model = model;
3037 record_btrace_cpu.stepping = stepping;
3038
3039 record_btrace_cpu_state = CS_CPU;
3040 }
3041
3042 /* The "show record btrace cpu" command. */
3043
3044 static void
3045 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3046 {
3047 if (args != nullptr && *args != 0)
3048 error (_("Trailing junk: '%s'."), args);
3049
3050 switch (record_btrace_cpu_state)
3051 {
3052 case CS_AUTO:
3053 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3054 return;
3055
3056 case CS_NONE:
3057 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3058 return;
3059
3060 case CS_CPU:
3061 switch (record_btrace_cpu.vendor)
3062 {
3063 case CV_INTEL:
3064 if (record_btrace_cpu.stepping == 0)
3065 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3066 record_btrace_cpu.family,
3067 record_btrace_cpu.model);
3068 else
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3070 record_btrace_cpu.family,
3071 record_btrace_cpu.model,
3072 record_btrace_cpu.stepping);
3073 return;
3074 }
3075 }
3076
3077 error (_("Internal error: bad cpu state."));
3078 }
3079
3080 /* The "s record btrace bts" command. */
3081
3082 static void
3083 cmd_set_record_btrace_bts (const char *args, int from_tty)
3084 {
3085 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3086 "by an appropriate subcommand.\n"));
3087 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3088 all_commands, gdb_stdout);
3089 }
3090
3091 /* The "show record btrace bts" command. */
3092
3093 static void
3094 cmd_show_record_btrace_bts (const char *args, int from_tty)
3095 {
3096 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3097 }
3098
3099 /* The "set record btrace pt" command. */
3100
3101 static void
3102 cmd_set_record_btrace_pt (const char *args, int from_tty)
3103 {
3104 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3105 "by an appropriate subcommand.\n"));
3106 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3107 all_commands, gdb_stdout);
3108 }
3109
3110 /* The "show record btrace pt" command. */
3111
3112 static void
3113 cmd_show_record_btrace_pt (const char *args, int from_tty)
3114 {
3115 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3116 }
3117
3118 /* The "record bts buffer-size" show value function. */
3119
3120 static void
3121 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3122 struct cmd_list_element *c,
3123 const char *value)
3124 {
3125 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3126 value);
3127 }
3128
3129 /* The "record pt buffer-size" show value function. */
3130
3131 static void
3132 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3133 struct cmd_list_element *c,
3134 const char *value)
3135 {
3136 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3137 value);
3138 }
3139
3140 /* Initialize btrace commands. */
3141
3142 void
3143 _initialize_record_btrace (void)
3144 {
3145 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3146 _("Start branch trace recording."), &record_btrace_cmdlist,
3147 "record btrace ", 0, &record_cmdlist);
3148 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3149
3150 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3151 _("\
3152 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3153 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3154 This format may not be available on all processors."),
3155 &record_btrace_cmdlist);
3156 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3157
3158 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3159 _("\
3160 Start branch trace recording in Intel Processor Trace format.\n\n\
3161 This format may not be available on all processors."),
3162 &record_btrace_cmdlist);
3163 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3164
3165 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3166 _("Set record options."), &set_record_btrace_cmdlist,
3167 "set record btrace ", 0, &set_record_cmdlist);
3168
3169 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3170 _("Show record options."), &show_record_btrace_cmdlist,
3171 "show record btrace ", 0, &show_record_cmdlist);
3172
3173 add_setshow_enum_cmd ("replay-memory-access", no_class,
3174 replay_memory_access_types, &replay_memory_access, _("\
3175 Set what memory accesses are allowed during replay."), _("\
3176 Show what memory accesses are allowed during replay."),
3177 _("Default is READ-ONLY.\n\n\
3178 The btrace record target does not trace data.\n\
3179 The memory therefore corresponds to the live target and not \
3180 to the current replay position.\n\n\
3181 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3182 When READ-WRITE, allow accesses to read-only and read-write memory during \
3183 replay."),
3184 NULL, cmd_show_replay_memory_access,
3185 &set_record_btrace_cmdlist,
3186 &show_record_btrace_cmdlist);
3187
3188 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3189 _("\
3190 Set the cpu to be used for trace decode.\n\n\
3191 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3192 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3193 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3194 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3195 When GDB does not support that cpu, this option can be used to enable\n\
3196 workarounds for a similar cpu that GDB supports.\n\n\
3197 When set to \"none\", errata workarounds are disabled."),
3198 &set_record_btrace_cpu_cmdlist,
3199 "set record btrace cpu ", 1,
3200 &set_record_btrace_cmdlist);
3201
3202 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3203 Automatically determine the cpu to be used for trace decode."),
3204 &set_record_btrace_cpu_cmdlist);
3205
3206 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3207 Do not enable errata workarounds for trace decode."),
3208 &set_record_btrace_cpu_cmdlist);
3209
3210 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3211 Show the cpu to be used for trace decode."),
3212 &show_record_btrace_cmdlist);
3213
3214 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3215 _("Set record btrace bts options."),
3216 &set_record_btrace_bts_cmdlist,
3217 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3218
3219 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3220 _("Show record btrace bts options."),
3221 &show_record_btrace_bts_cmdlist,
3222 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3223
3224 add_setshow_uinteger_cmd ("buffer-size", no_class,
3225 &record_btrace_conf.bts.size,
3226 _("Set the record/replay bts buffer size."),
3227 _("Show the record/replay bts buffer size."), _("\
3228 When starting recording request a trace buffer of this size. \
3229 The actual buffer size may differ from the requested size. \
3230 Use \"info record\" to see the actual buffer size.\n\n\
3231 Bigger buffers allow longer recording but also take more time to process \
3232 the recorded execution trace.\n\n\
3233 The trace buffer size may not be changed while recording."), NULL,
3234 show_record_bts_buffer_size_value,
3235 &set_record_btrace_bts_cmdlist,
3236 &show_record_btrace_bts_cmdlist);
3237
3238 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3239 _("Set record btrace pt options."),
3240 &set_record_btrace_pt_cmdlist,
3241 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3242
3243 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3244 _("Show record btrace pt options."),
3245 &show_record_btrace_pt_cmdlist,
3246 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3247
3248 add_setshow_uinteger_cmd ("buffer-size", no_class,
3249 &record_btrace_conf.pt.size,
3250 _("Set the record/replay pt buffer size."),
3251 _("Show the record/replay pt buffer size."), _("\
3252 Bigger buffers allow longer recording but also take more time to process \
3253 the recorded execution.\n\
3254 The actual buffer size may differ from the requested size. Use \"info record\" \
3255 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3256 &set_record_btrace_pt_cmdlist,
3257 &show_record_btrace_pt_cmdlist);
3258
3259 add_target (record_btrace_target_info, record_btrace_target_open);
3260
3261 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3262 xcalloc, xfree);
3263
3264 record_btrace_conf.bts.size = 64 * 1024;
3265 record_btrace_conf.pt.size = 16 * 1024;
3266 }