]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Add demangling support to readelf.
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observable.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "gdbsupport/event-loop.h"
40 #include "inf-loop.h"
41 #include "inferior.h"
42 #include <algorithm>
43 #include "gdbarch.h"
44 #include "cli/cli-style.h"
45 #include "async-event.h"
46 #include <forward_list>
47
48 static const target_info record_btrace_target_info = {
49 "record-btrace",
50 N_("Branch tracing target"),
51 N_("Collect control-flow trace and provide the execution history.")
52 };
53
54 /* The target_ops of record-btrace. */
55
56 class record_btrace_target final : public target_ops
57 {
58 public:
59 const target_info &info () const override
60 { return record_btrace_target_info; }
61
62 strata stratum () const override { return record_stratum; }
63
64 void close () override;
65 void async (int) override;
66
67 void detach (inferior *inf, int from_tty) override
68 { record_detach (this, inf, from_tty); }
69
70 void disconnect (const char *, int) override;
71
72 void mourn_inferior () override
73 { record_mourn_inferior (this); }
74
75 void kill () override
76 { record_kill (this); }
77
78 enum record_method record_method (ptid_t ptid) override;
79
80 void stop_recording () override;
81 void info_record () override;
82
83 void insn_history (int size, gdb_disassembly_flags flags) override;
84 void insn_history_from (ULONGEST from, int size,
85 gdb_disassembly_flags flags) override;
86 void insn_history_range (ULONGEST begin, ULONGEST end,
87 gdb_disassembly_flags flags) override;
88 void call_history (int size, record_print_flags flags) override;
89 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
90 override;
91 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92 override;
93
94 bool record_is_replaying (ptid_t ptid) override;
95 bool record_will_replay (ptid_t ptid, int dir) override;
96 void record_stop_replaying () override;
97
98 enum target_xfer_status xfer_partial (enum target_object object,
99 const char *annex,
100 gdb_byte *readbuf,
101 const gdb_byte *writebuf,
102 ULONGEST offset, ULONGEST len,
103 ULONGEST *xfered_len) override;
104
105 int insert_breakpoint (struct gdbarch *,
106 struct bp_target_info *) override;
107 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
108 enum remove_bp_reason) override;
109
110 void fetch_registers (struct regcache *, int) override;
111
112 void store_registers (struct regcache *, int) override;
113 void prepare_to_store (struct regcache *) override;
114
115 const struct frame_unwind *get_unwinder () override;
116
117 const struct frame_unwind *get_tailcall_unwinder () override;
118
119 void commit_resume () override;
120 void resume (ptid_t, int, enum gdb_signal) override;
121 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
122
123 void stop (ptid_t) override;
124 void update_thread_list () override;
125 bool thread_alive (ptid_t ptid) override;
126 void goto_record_begin () override;
127 void goto_record_end () override;
128 void goto_record (ULONGEST insn) override;
129
130 bool can_execute_reverse () override;
131
132 bool stopped_by_sw_breakpoint () override;
133 bool supports_stopped_by_sw_breakpoint () override;
134
135 bool stopped_by_hw_breakpoint () override;
136 bool supports_stopped_by_hw_breakpoint () override;
137
138 enum exec_direction_kind execution_direction () override;
139 void prepare_to_generate_core () override;
140 void done_generating_core () override;
141 };
142
143 static record_btrace_target record_btrace_ops;
144
145 /* Initialize the record-btrace target ops. */
146
147 /* Token associated with a new-thread observer enabling branch tracing
148 for the new thread. */
149 static const gdb::observers::token record_btrace_thread_observer_token {};
150
151 /* Memory access types used in set/show record btrace replay-memory-access. */
152 static const char replay_memory_access_read_only[] = "read-only";
153 static const char replay_memory_access_read_write[] = "read-write";
154 static const char *const replay_memory_access_types[] =
155 {
156 replay_memory_access_read_only,
157 replay_memory_access_read_write,
158 NULL
159 };
160
161 /* The currently allowed replay memory access type. */
162 static const char *replay_memory_access = replay_memory_access_read_only;
163
164 /* The cpu state kinds. */
165 enum record_btrace_cpu_state_kind
166 {
167 CS_AUTO,
168 CS_NONE,
169 CS_CPU
170 };
171
172 /* The current cpu state. */
173 static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
174
175 /* The current cpu for trace decode. */
176 static struct btrace_cpu record_btrace_cpu;
177
178 /* Command lists for "set/show record btrace". */
179 static struct cmd_list_element *set_record_btrace_cmdlist;
180 static struct cmd_list_element *show_record_btrace_cmdlist;
181
182 /* The execution direction of the last resume we got. See record-full.c. */
183 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
184
185 /* The async event handler for reverse/replay execution. */
186 static struct async_event_handler *record_btrace_async_inferior_event_handler;
187
188 /* A flag indicating that we are currently generating a core file. */
189 static int record_btrace_generating_corefile;
190
191 /* The current branch trace configuration. */
192 static struct btrace_config record_btrace_conf;
193
194 /* Command list for "record btrace". */
195 static struct cmd_list_element *record_btrace_cmdlist;
196
197 /* Command lists for "set/show record btrace bts". */
198 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
199 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
200
201 /* Command lists for "set/show record btrace pt". */
202 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
203 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
204
205 /* Command list for "set record btrace cpu". */
206 static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
207
208 /* Print a record-btrace debug message. Use do ... while (0) to avoid
209 ambiguities when used in if statements. */
210
211 #define DEBUG(msg, args...) \
212 do \
213 { \
214 if (record_debug != 0) \
215 fprintf_unfiltered (gdb_stdlog, \
216 "[record-btrace] " msg "\n", ##args); \
217 } \
218 while (0)
219
220
221 /* Return the cpu configured by the user. Returns NULL if the cpu was
222 configured as auto. */
223 const struct btrace_cpu *
224 record_btrace_get_cpu (void)
225 {
226 switch (record_btrace_cpu_state)
227 {
228 case CS_AUTO:
229 return nullptr;
230
231 case CS_NONE:
232 record_btrace_cpu.vendor = CV_UNKNOWN;
233 /* Fall through. */
234 case CS_CPU:
235 return &record_btrace_cpu;
236 }
237
238 error (_("Internal error: bad record btrace cpu state."));
239 }
240
241 /* Update the branch trace for the current thread and return a pointer to its
242 thread_info.
243
244 Throws an error if there is no thread or no trace. This function never
245 returns NULL. */
246
247 static struct thread_info *
248 require_btrace_thread (void)
249 {
250 DEBUG ("require");
251
252 if (inferior_ptid == null_ptid)
253 error (_("No thread."));
254
255 thread_info *tp = inferior_thread ();
256
257 validate_registers_access ();
258
259 btrace_fetch (tp, record_btrace_get_cpu ());
260
261 if (btrace_is_empty (tp))
262 error (_("No trace."));
263
264 return tp;
265 }
266
267 /* Update the branch trace for the current thread and return a pointer to its
268 branch trace information struct.
269
270 Throws an error if there is no thread or no trace. This function never
271 returns NULL. */
272
273 static struct btrace_thread_info *
274 require_btrace (void)
275 {
276 struct thread_info *tp;
277
278 tp = require_btrace_thread ();
279
280 return &tp->btrace;
281 }
282
283 /* Enable branch tracing for one thread. Warn on errors. */
284
285 static void
286 record_btrace_enable_warn (struct thread_info *tp)
287 {
288 /* Ignore this thread if its inferior is not recorded by us. */
289 target_ops *rec = tp->inf->target_at (record_stratum);
290 if (rec != &record_btrace_ops)
291 return;
292
293 try
294 {
295 btrace_enable (tp, &record_btrace_conf);
296 }
297 catch (const gdb_exception_error &error)
298 {
299 warning ("%s", error.what ());
300 }
301 }
302
303 /* Enable automatic tracing of new threads. */
304
305 static void
306 record_btrace_auto_enable (void)
307 {
308 DEBUG ("attach thread observer");
309
310 gdb::observers::new_thread.attach (record_btrace_enable_warn,
311 record_btrace_thread_observer_token);
312 }
313
314 /* Disable automatic tracing of new threads. */
315
316 static void
317 record_btrace_auto_disable (void)
318 {
319 DEBUG ("detach thread observer");
320
321 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
322 }
323
324 /* The record-btrace async event handler function. */
325
326 static void
327 record_btrace_handle_async_inferior_event (gdb_client_data data)
328 {
329 inferior_event_handler (INF_REG_EVENT);
330 }
331
332 /* See record-btrace.h. */
333
334 void
335 record_btrace_push_target (void)
336 {
337 const char *format;
338
339 record_btrace_auto_enable ();
340
341 push_target (&record_btrace_ops);
342
343 record_btrace_async_inferior_event_handler
344 = create_async_event_handler (record_btrace_handle_async_inferior_event,
345 NULL);
346 record_btrace_generating_corefile = 0;
347
348 format = btrace_format_short_string (record_btrace_conf.format);
349 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
350 }
351
352 /* Disable btrace on a set of threads on scope exit. */
353
354 struct scoped_btrace_disable
355 {
356 scoped_btrace_disable () = default;
357
358 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
359
360 ~scoped_btrace_disable ()
361 {
362 for (thread_info *tp : m_threads)
363 btrace_disable (tp);
364 }
365
366 void add_thread (thread_info *thread)
367 {
368 m_threads.push_front (thread);
369 }
370
371 void discard ()
372 {
373 m_threads.clear ();
374 }
375
376 private:
377 std::forward_list<thread_info *> m_threads;
378 };
379
380 /* Open target record-btrace. */
381
382 static void
383 record_btrace_target_open (const char *args, int from_tty)
384 {
385 /* If we fail to enable btrace for one thread, disable it for the threads for
386 which it was successfully enabled. */
387 scoped_btrace_disable btrace_disable;
388
389 DEBUG ("open");
390
391 record_preopen ();
392
393 if (!target_has_execution)
394 error (_("The program is not being run."));
395
396 for (thread_info *tp : current_inferior ()->non_exited_threads ())
397 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
398 {
399 btrace_enable (tp, &record_btrace_conf);
400
401 btrace_disable.add_thread (tp);
402 }
403
404 record_btrace_push_target ();
405
406 btrace_disable.discard ();
407 }
408
409 /* The stop_recording method of target record-btrace. */
410
411 void
412 record_btrace_target::stop_recording ()
413 {
414 DEBUG ("stop recording");
415
416 record_btrace_auto_disable ();
417
418 for (thread_info *tp : current_inferior ()->non_exited_threads ())
419 if (tp->btrace.target != NULL)
420 btrace_disable (tp);
421 }
422
423 /* The disconnect method of target record-btrace. */
424
425 void
426 record_btrace_target::disconnect (const char *args,
427 int from_tty)
428 {
429 struct target_ops *beneath = this->beneath ();
430
431 /* Do not stop recording, just clean up GDB side. */
432 unpush_target (this);
433
434 /* Forward disconnect. */
435 beneath->disconnect (args, from_tty);
436 }
437
438 /* The close method of target record-btrace. */
439
440 void
441 record_btrace_target::close ()
442 {
443 if (record_btrace_async_inferior_event_handler != NULL)
444 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
445
446 /* Make sure automatic recording gets disabled even if we did not stop
447 recording before closing the record-btrace target. */
448 record_btrace_auto_disable ();
449
450 /* We should have already stopped recording.
451 Tear down btrace in case we have not. */
452 for (thread_info *tp : current_inferior ()->non_exited_threads ())
453 btrace_teardown (tp);
454 }
455
456 /* The async method of target record-btrace. */
457
458 void
459 record_btrace_target::async (int enable)
460 {
461 if (enable)
462 mark_async_event_handler (record_btrace_async_inferior_event_handler);
463 else
464 clear_async_event_handler (record_btrace_async_inferior_event_handler);
465
466 this->beneath ()->async (enable);
467 }
468
469 /* Adjusts the size and returns a human readable size suffix. */
470
471 static const char *
472 record_btrace_adjust_size (unsigned int *size)
473 {
474 unsigned int sz;
475
476 sz = *size;
477
478 if ((sz & ((1u << 30) - 1)) == 0)
479 {
480 *size = sz >> 30;
481 return "GB";
482 }
483 else if ((sz & ((1u << 20) - 1)) == 0)
484 {
485 *size = sz >> 20;
486 return "MB";
487 }
488 else if ((sz & ((1u << 10) - 1)) == 0)
489 {
490 *size = sz >> 10;
491 return "kB";
492 }
493 else
494 return "";
495 }
496
497 /* Print a BTS configuration. */
498
499 static void
500 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
501 {
502 const char *suffix;
503 unsigned int size;
504
505 size = conf->size;
506 if (size > 0)
507 {
508 suffix = record_btrace_adjust_size (&size);
509 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
510 }
511 }
512
513 /* Print an Intel Processor Trace configuration. */
514
515 static void
516 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
517 {
518 const char *suffix;
519 unsigned int size;
520
521 size = conf->size;
522 if (size > 0)
523 {
524 suffix = record_btrace_adjust_size (&size);
525 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
526 }
527 }
528
529 /* Print a branch tracing configuration. */
530
531 static void
532 record_btrace_print_conf (const struct btrace_config *conf)
533 {
534 printf_unfiltered (_("Recording format: %s.\n"),
535 btrace_format_string (conf->format));
536
537 switch (conf->format)
538 {
539 case BTRACE_FORMAT_NONE:
540 return;
541
542 case BTRACE_FORMAT_BTS:
543 record_btrace_print_bts_conf (&conf->bts);
544 return;
545
546 case BTRACE_FORMAT_PT:
547 record_btrace_print_pt_conf (&conf->pt);
548 return;
549 }
550
551 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
552 }
553
554 /* The info_record method of target record-btrace. */
555
556 void
557 record_btrace_target::info_record ()
558 {
559 struct btrace_thread_info *btinfo;
560 const struct btrace_config *conf;
561 struct thread_info *tp;
562 unsigned int insns, calls, gaps;
563
564 DEBUG ("info");
565
566 if (inferior_ptid == null_ptid)
567 error (_("No thread."));
568
569 tp = inferior_thread ();
570
571 validate_registers_access ();
572
573 btinfo = &tp->btrace;
574
575 conf = ::btrace_conf (btinfo);
576 if (conf != NULL)
577 record_btrace_print_conf (conf);
578
579 btrace_fetch (tp, record_btrace_get_cpu ());
580
581 insns = 0;
582 calls = 0;
583 gaps = 0;
584
585 if (!btrace_is_empty (tp))
586 {
587 struct btrace_call_iterator call;
588 struct btrace_insn_iterator insn;
589
590 btrace_call_end (&call, btinfo);
591 btrace_call_prev (&call, 1);
592 calls = btrace_call_number (&call);
593
594 btrace_insn_end (&insn, btinfo);
595 insns = btrace_insn_number (&insn);
596
597 /* If the last instruction is not a gap, it is the current instruction
598 that is not actually part of the record. */
599 if (btrace_insn_get (&insn) != NULL)
600 insns -= 1;
601
602 gaps = btinfo->ngaps;
603 }
604
605 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
606 "for thread %s (%s).\n"), insns, calls, gaps,
607 print_thread_id (tp),
608 target_pid_to_str (tp->ptid).c_str ());
609
610 if (btrace_is_replaying (tp))
611 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
612 btrace_insn_number (btinfo->replay));
613 }
614
615 /* Print a decode error. */
616
617 static void
618 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
619 enum btrace_format format)
620 {
621 const char *errstr = btrace_decode_error (format, errcode);
622
623 uiout->text (_("["));
624 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
625 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
626 {
627 uiout->text (_("decode error ("));
628 uiout->field_signed ("errcode", errcode);
629 uiout->text (_("): "));
630 }
631 uiout->text (errstr);
632 uiout->text (_("]\n"));
633 }
634
635 /* A range of source lines. */
636
637 struct btrace_line_range
638 {
639 /* The symtab this line is from. */
640 struct symtab *symtab;
641
642 /* The first line (inclusive). */
643 int begin;
644
645 /* The last line (exclusive). */
646 int end;
647 };
648
649 /* Construct a line range. */
650
651 static struct btrace_line_range
652 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
653 {
654 struct btrace_line_range range;
655
656 range.symtab = symtab;
657 range.begin = begin;
658 range.end = end;
659
660 return range;
661 }
662
663 /* Add a line to a line range. */
664
665 static struct btrace_line_range
666 btrace_line_range_add (struct btrace_line_range range, int line)
667 {
668 if (range.end <= range.begin)
669 {
670 /* This is the first entry. */
671 range.begin = line;
672 range.end = line + 1;
673 }
674 else if (line < range.begin)
675 range.begin = line;
676 else if (range.end < line)
677 range.end = line;
678
679 return range;
680 }
681
682 /* Return non-zero if RANGE is empty, zero otherwise. */
683
684 static int
685 btrace_line_range_is_empty (struct btrace_line_range range)
686 {
687 return range.end <= range.begin;
688 }
689
690 /* Return non-zero if LHS contains RHS, zero otherwise. */
691
692 static int
693 btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
695 {
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
699 }
700
701 /* Find the line range associated with PC. */
702
703 static struct btrace_line_range
704 btrace_find_line_range (CORE_ADDR pc)
705 {
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
710 int nlines, i;
711
712 symtab = find_pc_line_symtab (pc);
713 if (symtab == NULL)
714 return btrace_mk_line_range (NULL, 0, 0);
715
716 ltable = SYMTAB_LINETABLE (symtab);
717 if (ltable == NULL)
718 return btrace_mk_line_range (symtab, 0, 0);
719
720 nlines = ltable->nitems;
721 lines = ltable->item;
722 if (nlines <= 0)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
727 {
728 /* The test of is_stmt here was added when the is_stmt field was
729 introduced to the 'struct linetable_entry' structure. This
730 ensured that this loop maintained the same behaviour as before we
731 introduced is_stmt. That said, it might be that we would be
732 better off not checking is_stmt here, this would lead to us
733 possibly adding more line numbers to the range. At the time this
734 change was made I was unsure how to test this so chose to go with
735 maintaining the existing experience. */
736 if ((lines[i].pc == pc) && (lines[i].line != 0)
737 && (lines[i].is_stmt == 1))
738 range = btrace_line_range_add (range, lines[i].line);
739 }
740
741 return range;
742 }
743
744 /* Print source lines in LINES to UIOUT.
745
746 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
747 instructions corresponding to that source line. When printing a new source
748 line, we do the cleanups for the open chain and open a new cleanup chain for
749 the new source line. If the source line range in LINES is not empty, this
750 function will leave the cleanup chain for the last printed source line open
751 so instructions can be added to it. */
752
753 static void
754 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
755 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
756 gdb::optional<ui_out_emit_list> *asm_list,
757 gdb_disassembly_flags flags)
758 {
759 print_source_lines_flags psl_flags;
760
761 if (flags & DISASSEMBLY_FILENAME)
762 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
763
764 for (int line = lines.begin; line < lines.end; ++line)
765 {
766 asm_list->reset ();
767
768 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
769
770 print_source_lines (lines.symtab, line, line + 1, psl_flags);
771
772 asm_list->emplace (uiout, "line_asm_insn");
773 }
774 }
775
776 /* Disassemble a section of the recorded instruction trace. */
777
778 static void
779 btrace_insn_history (struct ui_out *uiout,
780 const struct btrace_thread_info *btinfo,
781 const struct btrace_insn_iterator *begin,
782 const struct btrace_insn_iterator *end,
783 gdb_disassembly_flags flags)
784 {
785 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
786 btrace_insn_number (begin), btrace_insn_number (end));
787
788 flags |= DISASSEMBLY_SPECULATIVE;
789
790 struct gdbarch *gdbarch = target_gdbarch ();
791 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
792
793 ui_out_emit_list list_emitter (uiout, "asm_insns");
794
795 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
796 gdb::optional<ui_out_emit_list> asm_list;
797
798 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
799
800 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
801 btrace_insn_next (&it, 1))
802 {
803 const struct btrace_insn *insn;
804
805 insn = btrace_insn_get (&it);
806
807 /* A NULL instruction indicates a gap in the trace. */
808 if (insn == NULL)
809 {
810 const struct btrace_config *conf;
811
812 conf = btrace_conf (btinfo);
813
814 /* We have trace so we must have a configuration. */
815 gdb_assert (conf != NULL);
816
817 uiout->field_fmt ("insn-number", "%u",
818 btrace_insn_number (&it));
819 uiout->text ("\t");
820
821 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
822 conf->format);
823 }
824 else
825 {
826 struct disasm_insn dinsn;
827
828 if ((flags & DISASSEMBLY_SOURCE) != 0)
829 {
830 struct btrace_line_range lines;
831
832 lines = btrace_find_line_range (insn->pc);
833 if (!btrace_line_range_is_empty (lines)
834 && !btrace_line_range_contains_range (last_lines, lines))
835 {
836 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
837 flags);
838 last_lines = lines;
839 }
840 else if (!src_and_asm_tuple.has_value ())
841 {
842 gdb_assert (!asm_list.has_value ());
843
844 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
845
846 /* No source information. */
847 asm_list.emplace (uiout, "line_asm_insn");
848 }
849
850 gdb_assert (src_and_asm_tuple.has_value ());
851 gdb_assert (asm_list.has_value ());
852 }
853
854 memset (&dinsn, 0, sizeof (dinsn));
855 dinsn.number = btrace_insn_number (&it);
856 dinsn.addr = insn->pc;
857
858 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
859 dinsn.is_speculative = 1;
860
861 disasm.pretty_print_insn (&dinsn, flags);
862 }
863 }
864 }
865
866 /* The insn_history method of target record-btrace. */
867
868 void
869 record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
870 {
871 struct btrace_thread_info *btinfo;
872 struct btrace_insn_history *history;
873 struct btrace_insn_iterator begin, end;
874 struct ui_out *uiout;
875 unsigned int context, covered;
876
877 uiout = current_uiout;
878 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
879 context = abs (size);
880 if (context == 0)
881 error (_("Bad record instruction-history-size."));
882
883 btinfo = require_btrace ();
884 history = btinfo->insn_history;
885 if (history == NULL)
886 {
887 struct btrace_insn_iterator *replay;
888
889 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
890
891 /* If we're replaying, we start at the replay position. Otherwise, we
892 start at the tail of the trace. */
893 replay = btinfo->replay;
894 if (replay != NULL)
895 begin = *replay;
896 else
897 btrace_insn_end (&begin, btinfo);
898
899 /* We start from here and expand in the requested direction. Then we
900 expand in the other direction, as well, to fill up any remaining
901 context. */
902 end = begin;
903 if (size < 0)
904 {
905 /* We want the current position covered, as well. */
906 covered = btrace_insn_next (&end, 1);
907 covered += btrace_insn_prev (&begin, context - covered);
908 covered += btrace_insn_next (&end, context - covered);
909 }
910 else
911 {
912 covered = btrace_insn_next (&end, context);
913 covered += btrace_insn_prev (&begin, context - covered);
914 }
915 }
916 else
917 {
918 begin = history->begin;
919 end = history->end;
920
921 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
922 btrace_insn_number (&begin), btrace_insn_number (&end));
923
924 if (size < 0)
925 {
926 end = begin;
927 covered = btrace_insn_prev (&begin, context);
928 }
929 else
930 {
931 begin = end;
932 covered = btrace_insn_next (&end, context);
933 }
934 }
935
936 if (covered > 0)
937 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
938 else
939 {
940 if (size < 0)
941 printf_unfiltered (_("At the start of the branch trace record.\n"));
942 else
943 printf_unfiltered (_("At the end of the branch trace record.\n"));
944 }
945
946 btrace_set_insn_history (btinfo, &begin, &end);
947 }
948
949 /* The insn_history_range method of target record-btrace. */
950
951 void
952 record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
953 gdb_disassembly_flags flags)
954 {
955 struct btrace_thread_info *btinfo;
956 struct btrace_insn_iterator begin, end;
957 struct ui_out *uiout;
958 unsigned int low, high;
959 int found;
960
961 uiout = current_uiout;
962 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
963 low = from;
964 high = to;
965
966 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
967
968 /* Check for wrap-arounds. */
969 if (low != from || high != to)
970 error (_("Bad range."));
971
972 if (high < low)
973 error (_("Bad range."));
974
975 btinfo = require_btrace ();
976
977 found = btrace_find_insn_by_number (&begin, btinfo, low);
978 if (found == 0)
979 error (_("Range out of bounds."));
980
981 found = btrace_find_insn_by_number (&end, btinfo, high);
982 if (found == 0)
983 {
984 /* Silently truncate the range. */
985 btrace_insn_end (&end, btinfo);
986 }
987 else
988 {
989 /* We want both begin and end to be inclusive. */
990 btrace_insn_next (&end, 1);
991 }
992
993 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
994 btrace_set_insn_history (btinfo, &begin, &end);
995 }
996
997 /* The insn_history_from method of target record-btrace. */
998
999 void
1000 record_btrace_target::insn_history_from (ULONGEST from, int size,
1001 gdb_disassembly_flags flags)
1002 {
1003 ULONGEST begin, end, context;
1004
1005 context = abs (size);
1006 if (context == 0)
1007 error (_("Bad record instruction-history-size."));
1008
1009 if (size < 0)
1010 {
1011 end = from;
1012
1013 if (from < context)
1014 begin = 0;
1015 else
1016 begin = from - context + 1;
1017 }
1018 else
1019 {
1020 begin = from;
1021 end = from + context - 1;
1022
1023 /* Check for wrap-around. */
1024 if (end < begin)
1025 end = ULONGEST_MAX;
1026 }
1027
1028 insn_history_range (begin, end, flags);
1029 }
1030
1031 /* Print the instruction number range for a function call history line. */
1032
1033 static void
1034 btrace_call_history_insn_range (struct ui_out *uiout,
1035 const struct btrace_function *bfun)
1036 {
1037 unsigned int begin, end, size;
1038
1039 size = bfun->insn.size ();
1040 gdb_assert (size > 0);
1041
1042 begin = bfun->insn_offset;
1043 end = begin + size - 1;
1044
1045 uiout->field_unsigned ("insn begin", begin);
1046 uiout->text (",");
1047 uiout->field_unsigned ("insn end", end);
1048 }
1049
1050 /* Compute the lowest and highest source line for the instructions in BFUN
1051 and return them in PBEGIN and PEND.
1052 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1053 result from inlining or macro expansion. */
1054
1055 static void
1056 btrace_compute_src_line_range (const struct btrace_function *bfun,
1057 int *pbegin, int *pend)
1058 {
1059 struct symtab *symtab;
1060 struct symbol *sym;
1061 int begin, end;
1062
1063 begin = INT_MAX;
1064 end = INT_MIN;
1065
1066 sym = bfun->sym;
1067 if (sym == NULL)
1068 goto out;
1069
1070 symtab = symbol_symtab (sym);
1071
1072 for (const btrace_insn &insn : bfun->insn)
1073 {
1074 struct symtab_and_line sal;
1075
1076 sal = find_pc_line (insn.pc, 0);
1077 if (sal.symtab != symtab || sal.line == 0)
1078 continue;
1079
1080 begin = std::min (begin, sal.line);
1081 end = std::max (end, sal.line);
1082 }
1083
1084 out:
1085 *pbegin = begin;
1086 *pend = end;
1087 }
1088
1089 /* Print the source line information for a function call history line. */
1090
1091 static void
1092 btrace_call_history_src_line (struct ui_out *uiout,
1093 const struct btrace_function *bfun)
1094 {
1095 struct symbol *sym;
1096 int begin, end;
1097
1098 sym = bfun->sym;
1099 if (sym == NULL)
1100 return;
1101
1102 uiout->field_string ("file",
1103 symtab_to_filename_for_display (symbol_symtab (sym)),
1104 file_name_style.style ());
1105
1106 btrace_compute_src_line_range (bfun, &begin, &end);
1107 if (end < begin)
1108 return;
1109
1110 uiout->text (":");
1111 uiout->field_signed ("min line", begin);
1112
1113 if (end == begin)
1114 return;
1115
1116 uiout->text (",");
1117 uiout->field_signed ("max line", end);
1118 }
1119
1120 /* Get the name of a branch trace function. */
1121
1122 static const char *
1123 btrace_get_bfun_name (const struct btrace_function *bfun)
1124 {
1125 struct minimal_symbol *msym;
1126 struct symbol *sym;
1127
1128 if (bfun == NULL)
1129 return "??";
1130
1131 msym = bfun->msym;
1132 sym = bfun->sym;
1133
1134 if (sym != NULL)
1135 return sym->print_name ();
1136 else if (msym != NULL)
1137 return msym->print_name ();
1138 else
1139 return "??";
1140 }
1141
1142 /* Disassemble a section of the recorded function trace. */
1143
1144 static void
1145 btrace_call_history (struct ui_out *uiout,
1146 const struct btrace_thread_info *btinfo,
1147 const struct btrace_call_iterator *begin,
1148 const struct btrace_call_iterator *end,
1149 int int_flags)
1150 {
1151 struct btrace_call_iterator it;
1152 record_print_flags flags = (enum record_print_flag) int_flags;
1153
1154 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1155 btrace_call_number (end));
1156
1157 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1158 {
1159 const struct btrace_function *bfun;
1160 struct minimal_symbol *msym;
1161 struct symbol *sym;
1162
1163 bfun = btrace_call_get (&it);
1164 sym = bfun->sym;
1165 msym = bfun->msym;
1166
1167 /* Print the function index. */
1168 uiout->field_unsigned ("index", bfun->number);
1169 uiout->text ("\t");
1170
1171 /* Indicate gaps in the trace. */
1172 if (bfun->errcode != 0)
1173 {
1174 const struct btrace_config *conf;
1175
1176 conf = btrace_conf (btinfo);
1177
1178 /* We have trace so we must have a configuration. */
1179 gdb_assert (conf != NULL);
1180
1181 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1182
1183 continue;
1184 }
1185
1186 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1187 {
1188 int level = bfun->level + btinfo->level, i;
1189
1190 for (i = 0; i < level; ++i)
1191 uiout->text (" ");
1192 }
1193
1194 if (sym != NULL)
1195 uiout->field_string ("function", sym->print_name (),
1196 function_name_style.style ());
1197 else if (msym != NULL)
1198 uiout->field_string ("function", msym->print_name (),
1199 function_name_style.style ());
1200 else if (!uiout->is_mi_like_p ())
1201 uiout->field_string ("function", "??",
1202 function_name_style.style ());
1203
1204 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1205 {
1206 uiout->text (_("\tinst "));
1207 btrace_call_history_insn_range (uiout, bfun);
1208 }
1209
1210 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1211 {
1212 uiout->text (_("\tat "));
1213 btrace_call_history_src_line (uiout, bfun);
1214 }
1215
1216 uiout->text ("\n");
1217 }
1218 }
1219
1220 /* The call_history method of target record-btrace. */
1221
1222 void
1223 record_btrace_target::call_history (int size, record_print_flags flags)
1224 {
1225 struct btrace_thread_info *btinfo;
1226 struct btrace_call_history *history;
1227 struct btrace_call_iterator begin, end;
1228 struct ui_out *uiout;
1229 unsigned int context, covered;
1230
1231 uiout = current_uiout;
1232 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1233 context = abs (size);
1234 if (context == 0)
1235 error (_("Bad record function-call-history-size."));
1236
1237 btinfo = require_btrace ();
1238 history = btinfo->call_history;
1239 if (history == NULL)
1240 {
1241 struct btrace_insn_iterator *replay;
1242
1243 DEBUG ("call-history (0x%x): %d", (int) flags, size);
1244
1245 /* If we're replaying, we start at the replay position. Otherwise, we
1246 start at the tail of the trace. */
1247 replay = btinfo->replay;
1248 if (replay != NULL)
1249 {
1250 begin.btinfo = btinfo;
1251 begin.index = replay->call_index;
1252 }
1253 else
1254 btrace_call_end (&begin, btinfo);
1255
1256 /* We start from here and expand in the requested direction. Then we
1257 expand in the other direction, as well, to fill up any remaining
1258 context. */
1259 end = begin;
1260 if (size < 0)
1261 {
1262 /* We want the current position covered, as well. */
1263 covered = btrace_call_next (&end, 1);
1264 covered += btrace_call_prev (&begin, context - covered);
1265 covered += btrace_call_next (&end, context - covered);
1266 }
1267 else
1268 {
1269 covered = btrace_call_next (&end, context);
1270 covered += btrace_call_prev (&begin, context- covered);
1271 }
1272 }
1273 else
1274 {
1275 begin = history->begin;
1276 end = history->end;
1277
1278 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
1279 btrace_call_number (&begin), btrace_call_number (&end));
1280
1281 if (size < 0)
1282 {
1283 end = begin;
1284 covered = btrace_call_prev (&begin, context);
1285 }
1286 else
1287 {
1288 begin = end;
1289 covered = btrace_call_next (&end, context);
1290 }
1291 }
1292
1293 if (covered > 0)
1294 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1295 else
1296 {
1297 if (size < 0)
1298 printf_unfiltered (_("At the start of the branch trace record.\n"));
1299 else
1300 printf_unfiltered (_("At the end of the branch trace record.\n"));
1301 }
1302
1303 btrace_set_call_history (btinfo, &begin, &end);
1304 }
1305
1306 /* The call_history_range method of target record-btrace. */
1307
1308 void
1309 record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1310 record_print_flags flags)
1311 {
1312 struct btrace_thread_info *btinfo;
1313 struct btrace_call_iterator begin, end;
1314 struct ui_out *uiout;
1315 unsigned int low, high;
1316 int found;
1317
1318 uiout = current_uiout;
1319 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1320 low = from;
1321 high = to;
1322
1323 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
1324
1325 /* Check for wrap-arounds. */
1326 if (low != from || high != to)
1327 error (_("Bad range."));
1328
1329 if (high < low)
1330 error (_("Bad range."));
1331
1332 btinfo = require_btrace ();
1333
1334 found = btrace_find_call_by_number (&begin, btinfo, low);
1335 if (found == 0)
1336 error (_("Range out of bounds."));
1337
1338 found = btrace_find_call_by_number (&end, btinfo, high);
1339 if (found == 0)
1340 {
1341 /* Silently truncate the range. */
1342 btrace_call_end (&end, btinfo);
1343 }
1344 else
1345 {
1346 /* We want both begin and end to be inclusive. */
1347 btrace_call_next (&end, 1);
1348 }
1349
1350 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1351 btrace_set_call_history (btinfo, &begin, &end);
1352 }
1353
1354 /* The call_history_from method of target record-btrace. */
1355
1356 void
1357 record_btrace_target::call_history_from (ULONGEST from, int size,
1358 record_print_flags flags)
1359 {
1360 ULONGEST begin, end, context;
1361
1362 context = abs (size);
1363 if (context == 0)
1364 error (_("Bad record function-call-history-size."));
1365
1366 if (size < 0)
1367 {
1368 end = from;
1369
1370 if (from < context)
1371 begin = 0;
1372 else
1373 begin = from - context + 1;
1374 }
1375 else
1376 {
1377 begin = from;
1378 end = from + context - 1;
1379
1380 /* Check for wrap-around. */
1381 if (end < begin)
1382 end = ULONGEST_MAX;
1383 }
1384
1385 call_history_range ( begin, end, flags);
1386 }
1387
1388 /* The record_method method of target record-btrace. */
1389
1390 enum record_method
1391 record_btrace_target::record_method (ptid_t ptid)
1392 {
1393 process_stratum_target *proc_target = current_inferior ()->process_target ();
1394 thread_info *const tp = find_thread_ptid (proc_target, ptid);
1395
1396 if (tp == NULL)
1397 error (_("No thread."));
1398
1399 if (tp->btrace.target == NULL)
1400 return RECORD_METHOD_NONE;
1401
1402 return RECORD_METHOD_BTRACE;
1403 }
1404
1405 /* The record_is_replaying method of target record-btrace. */
1406
1407 bool
1408 record_btrace_target::record_is_replaying (ptid_t ptid)
1409 {
1410 process_stratum_target *proc_target = current_inferior ()->process_target ();
1411 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
1412 if (btrace_is_replaying (tp))
1413 return true;
1414
1415 return false;
1416 }
1417
1418 /* The record_will_replay method of target record-btrace. */
1419
1420 bool
1421 record_btrace_target::record_will_replay (ptid_t ptid, int dir)
1422 {
1423 return dir == EXEC_REVERSE || record_is_replaying (ptid);
1424 }
1425
1426 /* The xfer_partial method of target record-btrace. */
1427
1428 enum target_xfer_status
1429 record_btrace_target::xfer_partial (enum target_object object,
1430 const char *annex, gdb_byte *readbuf,
1431 const gdb_byte *writebuf, ULONGEST offset,
1432 ULONGEST len, ULONGEST *xfered_len)
1433 {
1434 /* Filter out requests that don't make sense during replay. */
1435 if (replay_memory_access == replay_memory_access_read_only
1436 && !record_btrace_generating_corefile
1437 && record_is_replaying (inferior_ptid))
1438 {
1439 switch (object)
1440 {
1441 case TARGET_OBJECT_MEMORY:
1442 {
1443 struct target_section *section;
1444
1445 /* We do not allow writing memory in general. */
1446 if (writebuf != NULL)
1447 {
1448 *xfered_len = len;
1449 return TARGET_XFER_UNAVAILABLE;
1450 }
1451
1452 /* We allow reading readonly memory. */
1453 section = target_section_by_addr (this, offset);
1454 if (section != NULL)
1455 {
1456 /* Check if the section we found is readonly. */
1457 if ((bfd_section_flags (section->the_bfd_section)
1458 & SEC_READONLY) != 0)
1459 {
1460 /* Truncate the request to fit into this section. */
1461 len = std::min (len, section->endaddr - offset);
1462 break;
1463 }
1464 }
1465
1466 *xfered_len = len;
1467 return TARGET_XFER_UNAVAILABLE;
1468 }
1469 }
1470 }
1471
1472 /* Forward the request. */
1473 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1474 offset, len, xfered_len);
1475 }
1476
1477 /* The insert_breakpoint method of target record-btrace. */
1478
1479 int
1480 record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1481 struct bp_target_info *bp_tgt)
1482 {
1483 const char *old;
1484 int ret;
1485
1486 /* Inserting breakpoints requires accessing memory. Allow it for the
1487 duration of this function. */
1488 old = replay_memory_access;
1489 replay_memory_access = replay_memory_access_read_write;
1490
1491 ret = 0;
1492 try
1493 {
1494 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
1495 }
1496 catch (const gdb_exception &except)
1497 {
1498 replay_memory_access = old;
1499 throw;
1500 }
1501 replay_memory_access = old;
1502
1503 return ret;
1504 }
1505
1506 /* The remove_breakpoint method of target record-btrace. */
1507
1508 int
1509 record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1510 struct bp_target_info *bp_tgt,
1511 enum remove_bp_reason reason)
1512 {
1513 const char *old;
1514 int ret;
1515
1516 /* Removing breakpoints requires accessing memory. Allow it for the
1517 duration of this function. */
1518 old = replay_memory_access;
1519 replay_memory_access = replay_memory_access_read_write;
1520
1521 ret = 0;
1522 try
1523 {
1524 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
1525 }
1526 catch (const gdb_exception &except)
1527 {
1528 replay_memory_access = old;
1529 throw;
1530 }
1531 replay_memory_access = old;
1532
1533 return ret;
1534 }
1535
1536 /* The fetch_registers method of target record-btrace. */
1537
1538 void
1539 record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1540 {
1541 btrace_insn_iterator *replay = nullptr;
1542
1543 /* Thread-db may ask for a thread's registers before GDB knows about the
1544 thread. We forward the request to the target beneath in this
1545 case. */
1546 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1547 if (tp != nullptr)
1548 replay = tp->btrace.replay;
1549
1550 if (replay != nullptr && !record_btrace_generating_corefile)
1551 {
1552 const struct btrace_insn *insn;
1553 struct gdbarch *gdbarch;
1554 int pcreg;
1555
1556 gdbarch = regcache->arch ();
1557 pcreg = gdbarch_pc_regnum (gdbarch);
1558 if (pcreg < 0)
1559 return;
1560
1561 /* We can only provide the PC register. */
1562 if (regno >= 0 && regno != pcreg)
1563 return;
1564
1565 insn = btrace_insn_get (replay);
1566 gdb_assert (insn != NULL);
1567
1568 regcache->raw_supply (regno, &insn->pc);
1569 }
1570 else
1571 this->beneath ()->fetch_registers (regcache, regno);
1572 }
1573
1574 /* The store_registers method of target record-btrace. */
1575
1576 void
1577 record_btrace_target::store_registers (struct regcache *regcache, int regno)
1578 {
1579 if (!record_btrace_generating_corefile
1580 && record_is_replaying (regcache->ptid ()))
1581 error (_("Cannot write registers while replaying."));
1582
1583 gdb_assert (may_write_registers);
1584
1585 this->beneath ()->store_registers (regcache, regno);
1586 }
1587
1588 /* The prepare_to_store method of target record-btrace. */
1589
1590 void
1591 record_btrace_target::prepare_to_store (struct regcache *regcache)
1592 {
1593 if (!record_btrace_generating_corefile
1594 && record_is_replaying (regcache->ptid ()))
1595 return;
1596
1597 this->beneath ()->prepare_to_store (regcache);
1598 }
1599
1600 /* The branch trace frame cache. */
1601
1602 struct btrace_frame_cache
1603 {
1604 /* The thread. */
1605 struct thread_info *tp;
1606
1607 /* The frame info. */
1608 struct frame_info *frame;
1609
1610 /* The branch trace function segment. */
1611 const struct btrace_function *bfun;
1612 };
1613
1614 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1615
1616 static htab_t bfcache;
1617
1618 /* hash_f for htab_create_alloc of bfcache. */
1619
1620 static hashval_t
1621 bfcache_hash (const void *arg)
1622 {
1623 const struct btrace_frame_cache *cache
1624 = (const struct btrace_frame_cache *) arg;
1625
1626 return htab_hash_pointer (cache->frame);
1627 }
1628
1629 /* eq_f for htab_create_alloc of bfcache. */
1630
1631 static int
1632 bfcache_eq (const void *arg1, const void *arg2)
1633 {
1634 const struct btrace_frame_cache *cache1
1635 = (const struct btrace_frame_cache *) arg1;
1636 const struct btrace_frame_cache *cache2
1637 = (const struct btrace_frame_cache *) arg2;
1638
1639 return cache1->frame == cache2->frame;
1640 }
1641
1642 /* Create a new btrace frame cache. */
1643
1644 static struct btrace_frame_cache *
1645 bfcache_new (struct frame_info *frame)
1646 {
1647 struct btrace_frame_cache *cache;
1648 void **slot;
1649
1650 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1651 cache->frame = frame;
1652
1653 slot = htab_find_slot (bfcache, cache, INSERT);
1654 gdb_assert (*slot == NULL);
1655 *slot = cache;
1656
1657 return cache;
1658 }
1659
1660 /* Extract the branch trace function from a branch trace frame. */
1661
1662 static const struct btrace_function *
1663 btrace_get_frame_function (struct frame_info *frame)
1664 {
1665 const struct btrace_frame_cache *cache;
1666 struct btrace_frame_cache pattern;
1667 void **slot;
1668
1669 pattern.frame = frame;
1670
1671 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1672 if (slot == NULL)
1673 return NULL;
1674
1675 cache = (const struct btrace_frame_cache *) *slot;
1676 return cache->bfun;
1677 }
1678
1679 /* Implement stop_reason method for record_btrace_frame_unwind. */
1680
1681 static enum unwind_stop_reason
1682 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1683 void **this_cache)
1684 {
1685 const struct btrace_frame_cache *cache;
1686 const struct btrace_function *bfun;
1687
1688 cache = (const struct btrace_frame_cache *) *this_cache;
1689 bfun = cache->bfun;
1690 gdb_assert (bfun != NULL);
1691
1692 if (bfun->up == 0)
1693 return UNWIND_UNAVAILABLE;
1694
1695 return UNWIND_NO_REASON;
1696 }
1697
1698 /* Implement this_id method for record_btrace_frame_unwind. */
1699
1700 static void
1701 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1702 struct frame_id *this_id)
1703 {
1704 const struct btrace_frame_cache *cache;
1705 const struct btrace_function *bfun;
1706 struct btrace_call_iterator it;
1707 CORE_ADDR code, special;
1708
1709 cache = (const struct btrace_frame_cache *) *this_cache;
1710
1711 bfun = cache->bfun;
1712 gdb_assert (bfun != NULL);
1713
1714 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1715 bfun = btrace_call_get (&it);
1716
1717 code = get_frame_func (this_frame);
1718 special = bfun->number;
1719
1720 *this_id = frame_id_build_unavailable_stack_special (code, special);
1721
1722 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1723 btrace_get_bfun_name (cache->bfun),
1724 core_addr_to_string_nz (this_id->code_addr),
1725 core_addr_to_string_nz (this_id->special_addr));
1726 }
1727
1728 /* Implement prev_register method for record_btrace_frame_unwind. */
1729
1730 static struct value *
1731 record_btrace_frame_prev_register (struct frame_info *this_frame,
1732 void **this_cache,
1733 int regnum)
1734 {
1735 const struct btrace_frame_cache *cache;
1736 const struct btrace_function *bfun, *caller;
1737 struct btrace_call_iterator it;
1738 struct gdbarch *gdbarch;
1739 CORE_ADDR pc;
1740 int pcreg;
1741
1742 gdbarch = get_frame_arch (this_frame);
1743 pcreg = gdbarch_pc_regnum (gdbarch);
1744 if (pcreg < 0 || regnum != pcreg)
1745 throw_error (NOT_AVAILABLE_ERROR,
1746 _("Registers are not available in btrace record history"));
1747
1748 cache = (const struct btrace_frame_cache *) *this_cache;
1749 bfun = cache->bfun;
1750 gdb_assert (bfun != NULL);
1751
1752 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1753 throw_error (NOT_AVAILABLE_ERROR,
1754 _("No caller in btrace record history"));
1755
1756 caller = btrace_call_get (&it);
1757
1758 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1759 pc = caller->insn.front ().pc;
1760 else
1761 {
1762 pc = caller->insn.back ().pc;
1763 pc += gdb_insn_length (gdbarch, pc);
1764 }
1765
1766 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1767 btrace_get_bfun_name (bfun), bfun->level,
1768 core_addr_to_string_nz (pc));
1769
1770 return frame_unwind_got_address (this_frame, regnum, pc);
1771 }
1772
1773 /* Implement sniffer method for record_btrace_frame_unwind. */
1774
1775 static int
1776 record_btrace_frame_sniffer (const struct frame_unwind *self,
1777 struct frame_info *this_frame,
1778 void **this_cache)
1779 {
1780 const struct btrace_function *bfun;
1781 struct btrace_frame_cache *cache;
1782 struct thread_info *tp;
1783 struct frame_info *next;
1784
1785 /* THIS_FRAME does not contain a reference to its thread. */
1786 tp = inferior_thread ();
1787
1788 bfun = NULL;
1789 next = get_next_frame (this_frame);
1790 if (next == NULL)
1791 {
1792 const struct btrace_insn_iterator *replay;
1793
1794 replay = tp->btrace.replay;
1795 if (replay != NULL)
1796 bfun = &replay->btinfo->functions[replay->call_index];
1797 }
1798 else
1799 {
1800 const struct btrace_function *callee;
1801 struct btrace_call_iterator it;
1802
1803 callee = btrace_get_frame_function (next);
1804 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1805 return 0;
1806
1807 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1808 return 0;
1809
1810 bfun = btrace_call_get (&it);
1811 }
1812
1813 if (bfun == NULL)
1814 return 0;
1815
1816 DEBUG ("[frame] sniffed frame for %s on level %d",
1817 btrace_get_bfun_name (bfun), bfun->level);
1818
1819 /* This is our frame. Initialize the frame cache. */
1820 cache = bfcache_new (this_frame);
1821 cache->tp = tp;
1822 cache->bfun = bfun;
1823
1824 *this_cache = cache;
1825 return 1;
1826 }
1827
1828 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1829
1830 static int
1831 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1832 struct frame_info *this_frame,
1833 void **this_cache)
1834 {
1835 const struct btrace_function *bfun, *callee;
1836 struct btrace_frame_cache *cache;
1837 struct btrace_call_iterator it;
1838 struct frame_info *next;
1839 struct thread_info *tinfo;
1840
1841 next = get_next_frame (this_frame);
1842 if (next == NULL)
1843 return 0;
1844
1845 callee = btrace_get_frame_function (next);
1846 if (callee == NULL)
1847 return 0;
1848
1849 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1850 return 0;
1851
1852 tinfo = inferior_thread ();
1853 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1854 return 0;
1855
1856 bfun = btrace_call_get (&it);
1857
1858 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1859 btrace_get_bfun_name (bfun), bfun->level);
1860
1861 /* This is our frame. Initialize the frame cache. */
1862 cache = bfcache_new (this_frame);
1863 cache->tp = tinfo;
1864 cache->bfun = bfun;
1865
1866 *this_cache = cache;
1867 return 1;
1868 }
1869
1870 static void
1871 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1872 {
1873 struct btrace_frame_cache *cache;
1874 void **slot;
1875
1876 cache = (struct btrace_frame_cache *) this_cache;
1877
1878 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1879 gdb_assert (slot != NULL);
1880
1881 htab_remove_elt (bfcache, cache);
1882 }
1883
1884 /* btrace recording does not store previous memory content, neither the stack
1885 frames content. Any unwinding would return erroneous results as the stack
1886 contents no longer matches the changed PC value restored from history.
1887 Therefore this unwinder reports any possibly unwound registers as
1888 <unavailable>. */
1889
1890 const struct frame_unwind record_btrace_frame_unwind =
1891 {
1892 NORMAL_FRAME,
1893 record_btrace_frame_unwind_stop_reason,
1894 record_btrace_frame_this_id,
1895 record_btrace_frame_prev_register,
1896 NULL,
1897 record_btrace_frame_sniffer,
1898 record_btrace_frame_dealloc_cache
1899 };
1900
1901 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1902 {
1903 TAILCALL_FRAME,
1904 record_btrace_frame_unwind_stop_reason,
1905 record_btrace_frame_this_id,
1906 record_btrace_frame_prev_register,
1907 NULL,
1908 record_btrace_tailcall_frame_sniffer,
1909 record_btrace_frame_dealloc_cache
1910 };
1911
1912 /* Implement the get_unwinder method. */
1913
1914 const struct frame_unwind *
1915 record_btrace_target::get_unwinder ()
1916 {
1917 return &record_btrace_frame_unwind;
1918 }
1919
1920 /* Implement the get_tailcall_unwinder method. */
1921
1922 const struct frame_unwind *
1923 record_btrace_target::get_tailcall_unwinder ()
1924 {
1925 return &record_btrace_tailcall_frame_unwind;
1926 }
1927
1928 /* Return a human-readable string for FLAG. */
1929
1930 static const char *
1931 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1932 {
1933 switch (flag)
1934 {
1935 case BTHR_STEP:
1936 return "step";
1937
1938 case BTHR_RSTEP:
1939 return "reverse-step";
1940
1941 case BTHR_CONT:
1942 return "cont";
1943
1944 case BTHR_RCONT:
1945 return "reverse-cont";
1946
1947 case BTHR_STOP:
1948 return "stop";
1949 }
1950
1951 return "<invalid>";
1952 }
1953
1954 /* Indicate that TP should be resumed according to FLAG. */
1955
1956 static void
1957 record_btrace_resume_thread (struct thread_info *tp,
1958 enum btrace_thread_flag flag)
1959 {
1960 struct btrace_thread_info *btinfo;
1961
1962 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1963 target_pid_to_str (tp->ptid).c_str (), flag,
1964 btrace_thread_flag_to_str (flag));
1965
1966 btinfo = &tp->btrace;
1967
1968 /* Fetch the latest branch trace. */
1969 btrace_fetch (tp, record_btrace_get_cpu ());
1970
1971 /* A resume request overwrites a preceding resume or stop request. */
1972 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1973 btinfo->flags |= flag;
1974 }
1975
1976 /* Get the current frame for TP. */
1977
1978 static struct frame_id
1979 get_thread_current_frame_id (struct thread_info *tp)
1980 {
1981 struct frame_id id;
1982 bool executing;
1983
1984 /* Set current thread, which is implicitly used by
1985 get_current_frame. */
1986 scoped_restore_current_thread restore_thread;
1987
1988 switch_to_thread (tp);
1989
1990 process_stratum_target *proc_target = tp->inf->process_target ();
1991
1992 /* Clear the executing flag to allow changes to the current frame.
1993 We are not actually running, yet. We just started a reverse execution
1994 command or a record goto command.
1995 For the latter, EXECUTING is false and this has no effect.
1996 For the former, EXECUTING is true and we're in wait, about to
1997 move the thread. Since we need to recompute the stack, we temporarily
1998 set EXECUTING to false. */
1999 executing = tp->executing;
2000 set_executing (proc_target, inferior_ptid, false);
2001
2002 id = null_frame_id;
2003 try
2004 {
2005 id = get_frame_id (get_current_frame ());
2006 }
2007 catch (const gdb_exception &except)
2008 {
2009 /* Restore the previous execution state. */
2010 set_executing (proc_target, inferior_ptid, executing);
2011
2012 throw;
2013 }
2014
2015 /* Restore the previous execution state. */
2016 set_executing (proc_target, inferior_ptid, executing);
2017
2018 return id;
2019 }
2020
2021 /* Start replaying a thread. */
2022
2023 static struct btrace_insn_iterator *
2024 record_btrace_start_replaying (struct thread_info *tp)
2025 {
2026 struct btrace_insn_iterator *replay;
2027 struct btrace_thread_info *btinfo;
2028
2029 btinfo = &tp->btrace;
2030 replay = NULL;
2031
2032 /* We can't start replaying without trace. */
2033 if (btinfo->functions.empty ())
2034 return NULL;
2035
2036 /* GDB stores the current frame_id when stepping in order to detects steps
2037 into subroutines.
2038 Since frames are computed differently when we're replaying, we need to
2039 recompute those stored frames and fix them up so we can still detect
2040 subroutines after we started replaying. */
2041 try
2042 {
2043 struct frame_id frame_id;
2044 int upd_step_frame_id, upd_step_stack_frame_id;
2045
2046 /* The current frame without replaying - computed via normal unwind. */
2047 frame_id = get_thread_current_frame_id (tp);
2048
2049 /* Check if we need to update any stepping-related frame id's. */
2050 upd_step_frame_id = frame_id_eq (frame_id,
2051 tp->control.step_frame_id);
2052 upd_step_stack_frame_id = frame_id_eq (frame_id,
2053 tp->control.step_stack_frame_id);
2054
2055 /* We start replaying at the end of the branch trace. This corresponds
2056 to the current instruction. */
2057 replay = XNEW (struct btrace_insn_iterator);
2058 btrace_insn_end (replay, btinfo);
2059
2060 /* Skip gaps at the end of the trace. */
2061 while (btrace_insn_get (replay) == NULL)
2062 {
2063 unsigned int steps;
2064
2065 steps = btrace_insn_prev (replay, 1);
2066 if (steps == 0)
2067 error (_("No trace."));
2068 }
2069
2070 /* We're not replaying, yet. */
2071 gdb_assert (btinfo->replay == NULL);
2072 btinfo->replay = replay;
2073
2074 /* Make sure we're not using any stale registers. */
2075 registers_changed_thread (tp);
2076
2077 /* The current frame with replaying - computed via btrace unwind. */
2078 frame_id = get_thread_current_frame_id (tp);
2079
2080 /* Replace stepping related frames where necessary. */
2081 if (upd_step_frame_id)
2082 tp->control.step_frame_id = frame_id;
2083 if (upd_step_stack_frame_id)
2084 tp->control.step_stack_frame_id = frame_id;
2085 }
2086 catch (const gdb_exception &except)
2087 {
2088 xfree (btinfo->replay);
2089 btinfo->replay = NULL;
2090
2091 registers_changed_thread (tp);
2092
2093 throw;
2094 }
2095
2096 return replay;
2097 }
2098
2099 /* Stop replaying a thread. */
2100
2101 static void
2102 record_btrace_stop_replaying (struct thread_info *tp)
2103 {
2104 struct btrace_thread_info *btinfo;
2105
2106 btinfo = &tp->btrace;
2107
2108 xfree (btinfo->replay);
2109 btinfo->replay = NULL;
2110
2111 /* Make sure we're not leaving any stale registers. */
2112 registers_changed_thread (tp);
2113 }
2114
2115 /* Stop replaying TP if it is at the end of its execution history. */
2116
2117 static void
2118 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2119 {
2120 struct btrace_insn_iterator *replay, end;
2121 struct btrace_thread_info *btinfo;
2122
2123 btinfo = &tp->btrace;
2124 replay = btinfo->replay;
2125
2126 if (replay == NULL)
2127 return;
2128
2129 btrace_insn_end (&end, btinfo);
2130
2131 if (btrace_insn_cmp (replay, &end) == 0)
2132 record_btrace_stop_replaying (tp);
2133 }
2134
2135 /* The resume method of target record-btrace. */
2136
2137 void
2138 record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
2139 {
2140 enum btrace_thread_flag flag, cflag;
2141
2142 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
2143 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
2144 step ? "step" : "cont");
2145
2146 /* Store the execution direction of the last resume.
2147
2148 If there is more than one resume call, we have to rely on infrun
2149 to not change the execution direction in-between. */
2150 record_btrace_resume_exec_dir = ::execution_direction;
2151
2152 /* As long as we're not replaying, just forward the request.
2153
2154 For non-stop targets this means that no thread is replaying. In order to
2155 make progress, we may need to explicitly move replaying threads to the end
2156 of their execution history. */
2157 if ((::execution_direction != EXEC_REVERSE)
2158 && !record_is_replaying (minus_one_ptid))
2159 {
2160 this->beneath ()->resume (ptid, step, signal);
2161 return;
2162 }
2163
2164 /* Compute the btrace thread flag for the requested move. */
2165 if (::execution_direction == EXEC_REVERSE)
2166 {
2167 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2168 cflag = BTHR_RCONT;
2169 }
2170 else
2171 {
2172 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2173 cflag = BTHR_CONT;
2174 }
2175
2176 /* We just indicate the resume intent here. The actual stepping happens in
2177 record_btrace_wait below.
2178
2179 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2180
2181 process_stratum_target *proc_target = current_inferior ()->process_target ();
2182
2183 if (!target_is_non_stop_p ())
2184 {
2185 gdb_assert (inferior_ptid.matches (ptid));
2186
2187 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2188 {
2189 if (tp->ptid.matches (inferior_ptid))
2190 record_btrace_resume_thread (tp, flag);
2191 else
2192 record_btrace_resume_thread (tp, cflag);
2193 }
2194 }
2195 else
2196 {
2197 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2198 record_btrace_resume_thread (tp, flag);
2199 }
2200
2201 /* Async support. */
2202 if (target_can_async_p ())
2203 {
2204 target_async (1);
2205 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2206 }
2207 }
2208
2209 /* The commit_resume method of target record-btrace. */
2210
2211 void
2212 record_btrace_target::commit_resume ()
2213 {
2214 if ((::execution_direction != EXEC_REVERSE)
2215 && !record_is_replaying (minus_one_ptid))
2216 beneath ()->commit_resume ();
2217 }
2218
2219 /* Cancel resuming TP. */
2220
2221 static void
2222 record_btrace_cancel_resume (struct thread_info *tp)
2223 {
2224 enum btrace_thread_flag flags;
2225
2226 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2227 if (flags == 0)
2228 return;
2229
2230 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2231 print_thread_id (tp),
2232 target_pid_to_str (tp->ptid).c_str (), flags,
2233 btrace_thread_flag_to_str (flags));
2234
2235 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2236 record_btrace_stop_replaying_at_end (tp);
2237 }
2238
2239 /* Return a target_waitstatus indicating that we ran out of history. */
2240
2241 static struct target_waitstatus
2242 btrace_step_no_history (void)
2243 {
2244 struct target_waitstatus status;
2245
2246 status.kind = TARGET_WAITKIND_NO_HISTORY;
2247
2248 return status;
2249 }
2250
2251 /* Return a target_waitstatus indicating that a step finished. */
2252
2253 static struct target_waitstatus
2254 btrace_step_stopped (void)
2255 {
2256 struct target_waitstatus status;
2257
2258 status.kind = TARGET_WAITKIND_STOPPED;
2259 status.value.sig = GDB_SIGNAL_TRAP;
2260
2261 return status;
2262 }
2263
2264 /* Return a target_waitstatus indicating that a thread was stopped as
2265 requested. */
2266
2267 static struct target_waitstatus
2268 btrace_step_stopped_on_request (void)
2269 {
2270 struct target_waitstatus status;
2271
2272 status.kind = TARGET_WAITKIND_STOPPED;
2273 status.value.sig = GDB_SIGNAL_0;
2274
2275 return status;
2276 }
2277
2278 /* Return a target_waitstatus indicating a spurious stop. */
2279
2280 static struct target_waitstatus
2281 btrace_step_spurious (void)
2282 {
2283 struct target_waitstatus status;
2284
2285 status.kind = TARGET_WAITKIND_SPURIOUS;
2286
2287 return status;
2288 }
2289
2290 /* Return a target_waitstatus indicating that the thread was not resumed. */
2291
2292 static struct target_waitstatus
2293 btrace_step_no_resumed (void)
2294 {
2295 struct target_waitstatus status;
2296
2297 status.kind = TARGET_WAITKIND_NO_RESUMED;
2298
2299 return status;
2300 }
2301
2302 /* Return a target_waitstatus indicating that we should wait again. */
2303
2304 static struct target_waitstatus
2305 btrace_step_again (void)
2306 {
2307 struct target_waitstatus status;
2308
2309 status.kind = TARGET_WAITKIND_IGNORE;
2310
2311 return status;
2312 }
2313
2314 /* Clear the record histories. */
2315
2316 static void
2317 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2318 {
2319 xfree (btinfo->insn_history);
2320 xfree (btinfo->call_history);
2321
2322 btinfo->insn_history = NULL;
2323 btinfo->call_history = NULL;
2324 }
2325
2326 /* Check whether TP's current replay position is at a breakpoint. */
2327
2328 static int
2329 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2330 {
2331 struct btrace_insn_iterator *replay;
2332 struct btrace_thread_info *btinfo;
2333 const struct btrace_insn *insn;
2334
2335 btinfo = &tp->btrace;
2336 replay = btinfo->replay;
2337
2338 if (replay == NULL)
2339 return 0;
2340
2341 insn = btrace_insn_get (replay);
2342 if (insn == NULL)
2343 return 0;
2344
2345 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
2346 &btinfo->stop_reason);
2347 }
2348
2349 /* Step one instruction in forward direction. */
2350
2351 static struct target_waitstatus
2352 record_btrace_single_step_forward (struct thread_info *tp)
2353 {
2354 struct btrace_insn_iterator *replay, end, start;
2355 struct btrace_thread_info *btinfo;
2356
2357 btinfo = &tp->btrace;
2358 replay = btinfo->replay;
2359
2360 /* We're done if we're not replaying. */
2361 if (replay == NULL)
2362 return btrace_step_no_history ();
2363
2364 /* Check if we're stepping a breakpoint. */
2365 if (record_btrace_replay_at_breakpoint (tp))
2366 return btrace_step_stopped ();
2367
2368 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2369 jump back to the instruction at which we started. */
2370 start = *replay;
2371 do
2372 {
2373 unsigned int steps;
2374
2375 /* We will bail out here if we continue stepping after reaching the end
2376 of the execution history. */
2377 steps = btrace_insn_next (replay, 1);
2378 if (steps == 0)
2379 {
2380 *replay = start;
2381 return btrace_step_no_history ();
2382 }
2383 }
2384 while (btrace_insn_get (replay) == NULL);
2385
2386 /* Determine the end of the instruction trace. */
2387 btrace_insn_end (&end, btinfo);
2388
2389 /* The execution trace contains (and ends with) the current instruction.
2390 This instruction has not been executed, yet, so the trace really ends
2391 one instruction earlier. */
2392 if (btrace_insn_cmp (replay, &end) == 0)
2393 return btrace_step_no_history ();
2394
2395 return btrace_step_spurious ();
2396 }
2397
2398 /* Step one instruction in backward direction. */
2399
2400 static struct target_waitstatus
2401 record_btrace_single_step_backward (struct thread_info *tp)
2402 {
2403 struct btrace_insn_iterator *replay, start;
2404 struct btrace_thread_info *btinfo;
2405
2406 btinfo = &tp->btrace;
2407 replay = btinfo->replay;
2408
2409 /* Start replaying if we're not already doing so. */
2410 if (replay == NULL)
2411 replay = record_btrace_start_replaying (tp);
2412
2413 /* If we can't step any further, we reached the end of the history.
2414 Skip gaps during replay. If we end up at a gap (at the beginning of
2415 the trace), jump back to the instruction at which we started. */
2416 start = *replay;
2417 do
2418 {
2419 unsigned int steps;
2420
2421 steps = btrace_insn_prev (replay, 1);
2422 if (steps == 0)
2423 {
2424 *replay = start;
2425 return btrace_step_no_history ();
2426 }
2427 }
2428 while (btrace_insn_get (replay) == NULL);
2429
2430 /* Check if we're stepping a breakpoint.
2431
2432 For reverse-stepping, this check is after the step. There is logic in
2433 infrun.c that handles reverse-stepping separately. See, for example,
2434 proceed and adjust_pc_after_break.
2435
2436 This code assumes that for reverse-stepping, PC points to the last
2437 de-executed instruction, whereas for forward-stepping PC points to the
2438 next to-be-executed instruction. */
2439 if (record_btrace_replay_at_breakpoint (tp))
2440 return btrace_step_stopped ();
2441
2442 return btrace_step_spurious ();
2443 }
2444
2445 /* Step a single thread. */
2446
2447 static struct target_waitstatus
2448 record_btrace_step_thread (struct thread_info *tp)
2449 {
2450 struct btrace_thread_info *btinfo;
2451 struct target_waitstatus status;
2452 enum btrace_thread_flag flags;
2453
2454 btinfo = &tp->btrace;
2455
2456 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2457 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2458
2459 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2460 target_pid_to_str (tp->ptid).c_str (), flags,
2461 btrace_thread_flag_to_str (flags));
2462
2463 /* We can't step without an execution history. */
2464 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2465 return btrace_step_no_history ();
2466
2467 switch (flags)
2468 {
2469 default:
2470 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2471
2472 case BTHR_STOP:
2473 return btrace_step_stopped_on_request ();
2474
2475 case BTHR_STEP:
2476 status = record_btrace_single_step_forward (tp);
2477 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2478 break;
2479
2480 return btrace_step_stopped ();
2481
2482 case BTHR_RSTEP:
2483 status = record_btrace_single_step_backward (tp);
2484 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2485 break;
2486
2487 return btrace_step_stopped ();
2488
2489 case BTHR_CONT:
2490 status = record_btrace_single_step_forward (tp);
2491 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2492 break;
2493
2494 btinfo->flags |= flags;
2495 return btrace_step_again ();
2496
2497 case BTHR_RCONT:
2498 status = record_btrace_single_step_backward (tp);
2499 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2500 break;
2501
2502 btinfo->flags |= flags;
2503 return btrace_step_again ();
2504 }
2505
2506 /* We keep threads moving at the end of their execution history. The wait
2507 method will stop the thread for whom the event is reported. */
2508 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2509 btinfo->flags |= flags;
2510
2511 return status;
2512 }
2513
2514 /* Announce further events if necessary. */
2515
2516 static void
2517 record_btrace_maybe_mark_async_event
2518 (const std::vector<thread_info *> &moving,
2519 const std::vector<thread_info *> &no_history)
2520 {
2521 bool more_moving = !moving.empty ();
2522 bool more_no_history = !no_history.empty ();;
2523
2524 if (!more_moving && !more_no_history)
2525 return;
2526
2527 if (more_moving)
2528 DEBUG ("movers pending");
2529
2530 if (more_no_history)
2531 DEBUG ("no-history pending");
2532
2533 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2534 }
2535
2536 /* The wait method of target record-btrace. */
2537
2538 ptid_t
2539 record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2540 int options)
2541 {
2542 std::vector<thread_info *> moving;
2543 std::vector<thread_info *> no_history;
2544
2545 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
2546
2547 /* As long as we're not replaying, just forward the request. */
2548 if ((::execution_direction != EXEC_REVERSE)
2549 && !record_is_replaying (minus_one_ptid))
2550 {
2551 return this->beneath ()->wait (ptid, status, options);
2552 }
2553
2554 /* Keep a work list of moving threads. */
2555 process_stratum_target *proc_target = current_inferior ()->process_target ();
2556 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2557 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2558 moving.push_back (tp);
2559
2560 if (moving.empty ())
2561 {
2562 *status = btrace_step_no_resumed ();
2563
2564 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
2565 target_waitstatus_to_string (status).c_str ());
2566
2567 return null_ptid;
2568 }
2569
2570 /* Step moving threads one by one, one step each, until either one thread
2571 reports an event or we run out of threads to step.
2572
2573 When stepping more than one thread, chances are that some threads reach
2574 the end of their execution history earlier than others. If we reported
2575 this immediately, all-stop on top of non-stop would stop all threads and
2576 resume the same threads next time. And we would report the same thread
2577 having reached the end of its execution history again.
2578
2579 In the worst case, this would starve the other threads. But even if other
2580 threads would be allowed to make progress, this would result in far too
2581 many intermediate stops.
2582
2583 We therefore delay the reporting of "no execution history" until we have
2584 nothing else to report. By this time, all threads should have moved to
2585 either the beginning or the end of their execution history. There will
2586 be a single user-visible stop. */
2587 struct thread_info *eventing = NULL;
2588 while ((eventing == NULL) && !moving.empty ())
2589 {
2590 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
2591 {
2592 thread_info *tp = moving[ix];
2593
2594 *status = record_btrace_step_thread (tp);
2595
2596 switch (status->kind)
2597 {
2598 case TARGET_WAITKIND_IGNORE:
2599 ix++;
2600 break;
2601
2602 case TARGET_WAITKIND_NO_HISTORY:
2603 no_history.push_back (ordered_remove (moving, ix));
2604 break;
2605
2606 default:
2607 eventing = unordered_remove (moving, ix);
2608 break;
2609 }
2610 }
2611 }
2612
2613 if (eventing == NULL)
2614 {
2615 /* We started with at least one moving thread. This thread must have
2616 either stopped or reached the end of its execution history.
2617
2618 In the former case, EVENTING must not be NULL.
2619 In the latter case, NO_HISTORY must not be empty. */
2620 gdb_assert (!no_history.empty ());
2621
2622 /* We kept threads moving at the end of their execution history. Stop
2623 EVENTING now that we are going to report its stop. */
2624 eventing = unordered_remove (no_history, 0);
2625 eventing->btrace.flags &= ~BTHR_MOVE;
2626
2627 *status = btrace_step_no_history ();
2628 }
2629
2630 gdb_assert (eventing != NULL);
2631
2632 /* We kept threads replaying at the end of their execution history. Stop
2633 replaying EVENTING now that we are going to report its stop. */
2634 record_btrace_stop_replaying_at_end (eventing);
2635
2636 /* Stop all other threads. */
2637 if (!target_is_non_stop_p ())
2638 {
2639 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2640 record_btrace_cancel_resume (tp);
2641 }
2642
2643 /* In async mode, we need to announce further events. */
2644 if (target_is_async_p ())
2645 record_btrace_maybe_mark_async_event (moving, no_history);
2646
2647 /* Start record histories anew from the current position. */
2648 record_btrace_clear_histories (&eventing->btrace);
2649
2650 /* We moved the replay position but did not update registers. */
2651 registers_changed_thread (eventing);
2652
2653 DEBUG ("wait ended by thread %s (%s): %s",
2654 print_thread_id (eventing),
2655 target_pid_to_str (eventing->ptid).c_str (),
2656 target_waitstatus_to_string (status).c_str ());
2657
2658 return eventing->ptid;
2659 }
2660
2661 /* The stop method of target record-btrace. */
2662
2663 void
2664 record_btrace_target::stop (ptid_t ptid)
2665 {
2666 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
2667
2668 /* As long as we're not replaying, just forward the request. */
2669 if ((::execution_direction != EXEC_REVERSE)
2670 && !record_is_replaying (minus_one_ptid))
2671 {
2672 this->beneath ()->stop (ptid);
2673 }
2674 else
2675 {
2676 process_stratum_target *proc_target
2677 = current_inferior ()->process_target ();
2678
2679 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
2680 {
2681 tp->btrace.flags &= ~BTHR_MOVE;
2682 tp->btrace.flags |= BTHR_STOP;
2683 }
2684 }
2685 }
2686
2687 /* The can_execute_reverse method of target record-btrace. */
2688
2689 bool
2690 record_btrace_target::can_execute_reverse ()
2691 {
2692 return true;
2693 }
2694
2695 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2696
2697 bool
2698 record_btrace_target::stopped_by_sw_breakpoint ()
2699 {
2700 if (record_is_replaying (minus_one_ptid))
2701 {
2702 struct thread_info *tp = inferior_thread ();
2703
2704 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2705 }
2706
2707 return this->beneath ()->stopped_by_sw_breakpoint ();
2708 }
2709
2710 /* The supports_stopped_by_sw_breakpoint method of target
2711 record-btrace. */
2712
2713 bool
2714 record_btrace_target::supports_stopped_by_sw_breakpoint ()
2715 {
2716 if (record_is_replaying (minus_one_ptid))
2717 return true;
2718
2719 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
2720 }
2721
2722 /* The stopped_by_sw_breakpoint method of target record-btrace. */
2723
2724 bool
2725 record_btrace_target::stopped_by_hw_breakpoint ()
2726 {
2727 if (record_is_replaying (minus_one_ptid))
2728 {
2729 struct thread_info *tp = inferior_thread ();
2730
2731 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2732 }
2733
2734 return this->beneath ()->stopped_by_hw_breakpoint ();
2735 }
2736
2737 /* The supports_stopped_by_hw_breakpoint method of target
2738 record-btrace. */
2739
2740 bool
2741 record_btrace_target::supports_stopped_by_hw_breakpoint ()
2742 {
2743 if (record_is_replaying (minus_one_ptid))
2744 return true;
2745
2746 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
2747 }
2748
2749 /* The update_thread_list method of target record-btrace. */
2750
2751 void
2752 record_btrace_target::update_thread_list ()
2753 {
2754 /* We don't add or remove threads during replay. */
2755 if (record_is_replaying (minus_one_ptid))
2756 return;
2757
2758 /* Forward the request. */
2759 this->beneath ()->update_thread_list ();
2760 }
2761
2762 /* The thread_alive method of target record-btrace. */
2763
2764 bool
2765 record_btrace_target::thread_alive (ptid_t ptid)
2766 {
2767 /* We don't add or remove threads during replay. */
2768 if (record_is_replaying (minus_one_ptid))
2769 return true;
2770
2771 /* Forward the request. */
2772 return this->beneath ()->thread_alive (ptid);
2773 }
2774
2775 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2776 is stopped. */
2777
2778 static void
2779 record_btrace_set_replay (struct thread_info *tp,
2780 const struct btrace_insn_iterator *it)
2781 {
2782 struct btrace_thread_info *btinfo;
2783
2784 btinfo = &tp->btrace;
2785
2786 if (it == NULL)
2787 record_btrace_stop_replaying (tp);
2788 else
2789 {
2790 if (btinfo->replay == NULL)
2791 record_btrace_start_replaying (tp);
2792 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2793 return;
2794
2795 *btinfo->replay = *it;
2796 registers_changed_thread (tp);
2797 }
2798
2799 /* Start anew from the new replay position. */
2800 record_btrace_clear_histories (btinfo);
2801
2802 inferior_thread ()->suspend.stop_pc
2803 = regcache_read_pc (get_current_regcache ());
2804 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2805 }
2806
2807 /* The goto_record_begin method of target record-btrace. */
2808
2809 void
2810 record_btrace_target::goto_record_begin ()
2811 {
2812 struct thread_info *tp;
2813 struct btrace_insn_iterator begin;
2814
2815 tp = require_btrace_thread ();
2816
2817 btrace_insn_begin (&begin, &tp->btrace);
2818
2819 /* Skip gaps at the beginning of the trace. */
2820 while (btrace_insn_get (&begin) == NULL)
2821 {
2822 unsigned int steps;
2823
2824 steps = btrace_insn_next (&begin, 1);
2825 if (steps == 0)
2826 error (_("No trace."));
2827 }
2828
2829 record_btrace_set_replay (tp, &begin);
2830 }
2831
2832 /* The goto_record_end method of target record-btrace. */
2833
2834 void
2835 record_btrace_target::goto_record_end ()
2836 {
2837 struct thread_info *tp;
2838
2839 tp = require_btrace_thread ();
2840
2841 record_btrace_set_replay (tp, NULL);
2842 }
2843
2844 /* The goto_record method of target record-btrace. */
2845
2846 void
2847 record_btrace_target::goto_record (ULONGEST insn)
2848 {
2849 struct thread_info *tp;
2850 struct btrace_insn_iterator it;
2851 unsigned int number;
2852 int found;
2853
2854 number = insn;
2855
2856 /* Check for wrap-arounds. */
2857 if (number != insn)
2858 error (_("Instruction number out of range."));
2859
2860 tp = require_btrace_thread ();
2861
2862 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2863
2864 /* Check if the instruction could not be found or is a gap. */
2865 if (found == 0 || btrace_insn_get (&it) == NULL)
2866 error (_("No such instruction."));
2867
2868 record_btrace_set_replay (tp, &it);
2869 }
2870
2871 /* The record_stop_replaying method of target record-btrace. */
2872
2873 void
2874 record_btrace_target::record_stop_replaying ()
2875 {
2876 for (thread_info *tp : current_inferior ()->non_exited_threads ())
2877 record_btrace_stop_replaying (tp);
2878 }
2879
2880 /* The execution_direction target method. */
2881
2882 enum exec_direction_kind
2883 record_btrace_target::execution_direction ()
2884 {
2885 return record_btrace_resume_exec_dir;
2886 }
2887
2888 /* The prepare_to_generate_core target method. */
2889
2890 void
2891 record_btrace_target::prepare_to_generate_core ()
2892 {
2893 record_btrace_generating_corefile = 1;
2894 }
2895
2896 /* The done_generating_core target method. */
2897
2898 void
2899 record_btrace_target::done_generating_core ()
2900 {
2901 record_btrace_generating_corefile = 0;
2902 }
2903
2904 /* Start recording in BTS format. */
2905
2906 static void
2907 cmd_record_btrace_bts_start (const char *args, int from_tty)
2908 {
2909 if (args != NULL && *args != 0)
2910 error (_("Invalid argument."));
2911
2912 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2913
2914 try
2915 {
2916 execute_command ("target record-btrace", from_tty);
2917 }
2918 catch (const gdb_exception &exception)
2919 {
2920 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2921 throw;
2922 }
2923 }
2924
2925 /* Start recording in Intel Processor Trace format. */
2926
2927 static void
2928 cmd_record_btrace_pt_start (const char *args, int from_tty)
2929 {
2930 if (args != NULL && *args != 0)
2931 error (_("Invalid argument."));
2932
2933 record_btrace_conf.format = BTRACE_FORMAT_PT;
2934
2935 try
2936 {
2937 execute_command ("target record-btrace", from_tty);
2938 }
2939 catch (const gdb_exception &exception)
2940 {
2941 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2942 throw;
2943 }
2944 }
2945
2946 /* Alias for "target record". */
2947
2948 static void
2949 cmd_record_btrace_start (const char *args, int from_tty)
2950 {
2951 if (args != NULL && *args != 0)
2952 error (_("Invalid argument."));
2953
2954 record_btrace_conf.format = BTRACE_FORMAT_PT;
2955
2956 try
2957 {
2958 execute_command ("target record-btrace", from_tty);
2959 }
2960 catch (const gdb_exception &exception)
2961 {
2962 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2963
2964 try
2965 {
2966 execute_command ("target record-btrace", from_tty);
2967 }
2968 catch (const gdb_exception &ex)
2969 {
2970 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2971 throw;
2972 }
2973 }
2974 }
2975
2976 /* The "show record btrace replay-memory-access" command. */
2977
2978 static void
2979 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2980 struct cmd_list_element *c, const char *value)
2981 {
2982 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2983 replay_memory_access);
2984 }
2985
2986 /* The "set record btrace cpu none" command. */
2987
2988 static void
2989 cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2990 {
2991 if (args != nullptr && *args != 0)
2992 error (_("Trailing junk: '%s'."), args);
2993
2994 record_btrace_cpu_state = CS_NONE;
2995 }
2996
2997 /* The "set record btrace cpu auto" command. */
2998
2999 static void
3000 cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3001 {
3002 if (args != nullptr && *args != 0)
3003 error (_("Trailing junk: '%s'."), args);
3004
3005 record_btrace_cpu_state = CS_AUTO;
3006 }
3007
3008 /* The "set record btrace cpu" command. */
3009
3010 static void
3011 cmd_set_record_btrace_cpu (const char *args, int from_tty)
3012 {
3013 if (args == nullptr)
3014 args = "";
3015
3016 /* We use a hard-coded vendor string for now. */
3017 unsigned int family, model, stepping;
3018 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3019 &model, &l1, &stepping, &l2);
3020 if (matches == 3)
3021 {
3022 if (strlen (args) != l2)
3023 error (_("Trailing junk: '%s'."), args + l2);
3024 }
3025 else if (matches == 2)
3026 {
3027 if (strlen (args) != l1)
3028 error (_("Trailing junk: '%s'."), args + l1);
3029
3030 stepping = 0;
3031 }
3032 else
3033 error (_("Bad format. See \"help set record btrace cpu\"."));
3034
3035 if (USHRT_MAX < family)
3036 error (_("Cpu family too big."));
3037
3038 if (UCHAR_MAX < model)
3039 error (_("Cpu model too big."));
3040
3041 if (UCHAR_MAX < stepping)
3042 error (_("Cpu stepping too big."));
3043
3044 record_btrace_cpu.vendor = CV_INTEL;
3045 record_btrace_cpu.family = family;
3046 record_btrace_cpu.model = model;
3047 record_btrace_cpu.stepping = stepping;
3048
3049 record_btrace_cpu_state = CS_CPU;
3050 }
3051
3052 /* The "show record btrace cpu" command. */
3053
3054 static void
3055 cmd_show_record_btrace_cpu (const char *args, int from_tty)
3056 {
3057 if (args != nullptr && *args != 0)
3058 error (_("Trailing junk: '%s'."), args);
3059
3060 switch (record_btrace_cpu_state)
3061 {
3062 case CS_AUTO:
3063 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3064 return;
3065
3066 case CS_NONE:
3067 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3068 return;
3069
3070 case CS_CPU:
3071 switch (record_btrace_cpu.vendor)
3072 {
3073 case CV_INTEL:
3074 if (record_btrace_cpu.stepping == 0)
3075 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3076 record_btrace_cpu.family,
3077 record_btrace_cpu.model);
3078 else
3079 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3080 record_btrace_cpu.family,
3081 record_btrace_cpu.model,
3082 record_btrace_cpu.stepping);
3083 return;
3084 }
3085 }
3086
3087 error (_("Internal error: bad cpu state."));
3088 }
3089
3090 /* The "record bts buffer-size" show value function. */
3091
3092 static void
3093 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3094 struct cmd_list_element *c,
3095 const char *value)
3096 {
3097 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3098 value);
3099 }
3100
3101 /* The "record pt buffer-size" show value function. */
3102
3103 static void
3104 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3105 struct cmd_list_element *c,
3106 const char *value)
3107 {
3108 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3109 value);
3110 }
3111
3112 /* Initialize btrace commands. */
3113
3114 void _initialize_record_btrace ();
3115 void
3116 _initialize_record_btrace ()
3117 {
3118 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3119 _("Start branch trace recording."), &record_btrace_cmdlist,
3120 "record btrace ", 0, &record_cmdlist);
3121 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3122
3123 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3124 _("\
3125 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3126 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3127 This format may not be available on all processors."),
3128 &record_btrace_cmdlist);
3129 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3130
3131 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3132 _("\
3133 Start branch trace recording in Intel Processor Trace format.\n\n\
3134 This format may not be available on all processors."),
3135 &record_btrace_cmdlist);
3136 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3137
3138 add_basic_prefix_cmd ("btrace", class_support,
3139 _("Set record options."), &set_record_btrace_cmdlist,
3140 "set record btrace ", 0, &set_record_cmdlist);
3141
3142 add_show_prefix_cmd ("btrace", class_support,
3143 _("Show record options."), &show_record_btrace_cmdlist,
3144 "show record btrace ", 0, &show_record_cmdlist);
3145
3146 add_setshow_enum_cmd ("replay-memory-access", no_class,
3147 replay_memory_access_types, &replay_memory_access, _("\
3148 Set what memory accesses are allowed during replay."), _("\
3149 Show what memory accesses are allowed during replay."),
3150 _("Default is READ-ONLY.\n\n\
3151 The btrace record target does not trace data.\n\
3152 The memory therefore corresponds to the live target and not \
3153 to the current replay position.\n\n\
3154 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3155 When READ-WRITE, allow accesses to read-only and read-write memory during \
3156 replay."),
3157 NULL, cmd_show_replay_memory_access,
3158 &set_record_btrace_cmdlist,
3159 &show_record_btrace_cmdlist);
3160
3161 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3162 _("\
3163 Set the cpu to be used for trace decode.\n\n\
3164 The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3165 For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
3166 When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3167 The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3168 When GDB does not support that cpu, this option can be used to enable\n\
3169 workarounds for a similar cpu that GDB supports.\n\n\
3170 When set to \"none\", errata workarounds are disabled."),
3171 &set_record_btrace_cpu_cmdlist,
3172 "set record btrace cpu ", 1,
3173 &set_record_btrace_cmdlist);
3174
3175 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3176 Automatically determine the cpu to be used for trace decode."),
3177 &set_record_btrace_cpu_cmdlist);
3178
3179 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3180 Do not enable errata workarounds for trace decode."),
3181 &set_record_btrace_cpu_cmdlist);
3182
3183 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3184 Show the cpu to be used for trace decode."),
3185 &show_record_btrace_cmdlist);
3186
3187 add_basic_prefix_cmd ("bts", class_support,
3188 _("Set record btrace bts options."),
3189 &set_record_btrace_bts_cmdlist,
3190 "set record btrace bts ", 0,
3191 &set_record_btrace_cmdlist);
3192
3193 add_show_prefix_cmd ("bts", class_support,
3194 _("Show record btrace bts options."),
3195 &show_record_btrace_bts_cmdlist,
3196 "show record btrace bts ", 0,
3197 &show_record_btrace_cmdlist);
3198
3199 add_setshow_uinteger_cmd ("buffer-size", no_class,
3200 &record_btrace_conf.bts.size,
3201 _("Set the record/replay bts buffer size."),
3202 _("Show the record/replay bts buffer size."), _("\
3203 When starting recording request a trace buffer of this size. \
3204 The actual buffer size may differ from the requested size. \
3205 Use \"info record\" to see the actual buffer size.\n\n\
3206 Bigger buffers allow longer recording but also take more time to process \
3207 the recorded execution trace.\n\n\
3208 The trace buffer size may not be changed while recording."), NULL,
3209 show_record_bts_buffer_size_value,
3210 &set_record_btrace_bts_cmdlist,
3211 &show_record_btrace_bts_cmdlist);
3212
3213 add_basic_prefix_cmd ("pt", class_support,
3214 _("Set record btrace pt options."),
3215 &set_record_btrace_pt_cmdlist,
3216 "set record btrace pt ", 0,
3217 &set_record_btrace_cmdlist);
3218
3219 add_show_prefix_cmd ("pt", class_support,
3220 _("Show record btrace pt options."),
3221 &show_record_btrace_pt_cmdlist,
3222 "show record btrace pt ", 0,
3223 &show_record_btrace_cmdlist);
3224
3225 add_setshow_uinteger_cmd ("buffer-size", no_class,
3226 &record_btrace_conf.pt.size,
3227 _("Set the record/replay pt buffer size."),
3228 _("Show the record/replay pt buffer size."), _("\
3229 Bigger buffers allow longer recording but also take more time to process \
3230 the recorded execution.\n\
3231 The actual buffer size may differ from the requested size. Use \"info record\" \
3232 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3233 &set_record_btrace_pt_cmdlist,
3234 &show_record_btrace_pt_cmdlist);
3235
3236 add_target (record_btrace_target_info, record_btrace_target_open);
3237
3238 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3239 xcalloc, xfree);
3240
3241 record_btrace_conf.bts.size = 64 * 1024;
3242 record_btrace_conf.pt.size = 16 * 1024;
3243 }