]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
Fix build errors in aix-thread.c
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
42a4f53d 3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
0747795c 41#include "common/vec.h"
00431a78 42#include "inferior.h"
325fac50 43#include <algorithm>
afedecd3 44
d9f719f1
PA
45static const target_info record_btrace_target_info = {
46 "record-btrace",
47 N_("Branch tracing target"),
48 N_("Collect control-flow trace and provide the execution history.")
49};
50
afedecd3 51/* The target_ops of record-btrace. */
f6ac5f3d
PA
52
53class record_btrace_target final : public target_ops
54{
55public:
d9f719f1
PA
56 const target_info &info () const override
57 { return record_btrace_target_info; }
f6ac5f3d 58
66b4deae
PA
59 strata stratum () const override { return record_stratum; }
60
f6ac5f3d
PA
61 void close () override;
62 void async (int) override;
63
64 void detach (inferior *inf, int from_tty) override
65 { record_detach (this, inf, from_tty); }
66
67 void disconnect (const char *, int) override;
68
69 void mourn_inferior () override
70 { record_mourn_inferior (this); }
71
72 void kill () override
73 { record_kill (this); }
74
75 enum record_method record_method (ptid_t ptid) override;
76
77 void stop_recording () override;
78 void info_record () override;
79
80 void insn_history (int size, gdb_disassembly_flags flags) override;
81 void insn_history_from (ULONGEST from, int size,
82 gdb_disassembly_flags flags) override;
83 void insn_history_range (ULONGEST begin, ULONGEST end,
84 gdb_disassembly_flags flags) override;
85 void call_history (int size, record_print_flags flags) override;
86 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
87 override;
88 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
89 override;
90
57810aa7
PA
91 bool record_is_replaying (ptid_t ptid) override;
92 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
93 void record_stop_replaying () override;
94
95 enum target_xfer_status xfer_partial (enum target_object object,
96 const char *annex,
97 gdb_byte *readbuf,
98 const gdb_byte *writebuf,
99 ULONGEST offset, ULONGEST len,
100 ULONGEST *xfered_len) override;
101
102 int insert_breakpoint (struct gdbarch *,
103 struct bp_target_info *) override;
104 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
105 enum remove_bp_reason) override;
106
107 void fetch_registers (struct regcache *, int) override;
108
109 void store_registers (struct regcache *, int) override;
110 void prepare_to_store (struct regcache *) override;
111
112 const struct frame_unwind *get_unwinder () override;
113
114 const struct frame_unwind *get_tailcall_unwinder () override;
115
116 void commit_resume () override;
117 void resume (ptid_t, int, enum gdb_signal) override;
118 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
119
120 void stop (ptid_t) override;
121 void update_thread_list () override;
57810aa7 122 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
123 void goto_record_begin () override;
124 void goto_record_end () override;
125 void goto_record (ULONGEST insn) override;
126
57810aa7 127 bool can_execute_reverse () override;
f6ac5f3d 128
57810aa7
PA
129 bool stopped_by_sw_breakpoint () override;
130 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 131
57810aa7
PA
132 bool stopped_by_hw_breakpoint () override;
133 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
134
135 enum exec_direction_kind execution_direction () override;
136 void prepare_to_generate_core () override;
137 void done_generating_core () override;
138};
139
140static record_btrace_target record_btrace_ops;
141
142/* Initialize the record-btrace target ops. */
afedecd3 143
76727919
TT
144/* Token associated with a new-thread observer enabling branch tracing
145 for the new thread. */
3dcfdc58 146static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 147
67b5c0c1
MM
148/* Memory access types used in set/show record btrace replay-memory-access. */
149static const char replay_memory_access_read_only[] = "read-only";
150static const char replay_memory_access_read_write[] = "read-write";
151static const char *const replay_memory_access_types[] =
152{
153 replay_memory_access_read_only,
154 replay_memory_access_read_write,
155 NULL
156};
157
158/* The currently allowed replay memory access type. */
159static const char *replay_memory_access = replay_memory_access_read_only;
160
4a4495d6
MM
161/* The cpu state kinds. */
162enum record_btrace_cpu_state_kind
163{
164 CS_AUTO,
165 CS_NONE,
166 CS_CPU
167};
168
169/* The current cpu state. */
170static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
171
172/* The current cpu for trace decode. */
173static struct btrace_cpu record_btrace_cpu;
174
67b5c0c1
MM
175/* Command lists for "set/show record btrace". */
176static struct cmd_list_element *set_record_btrace_cmdlist;
177static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 178
70ad5bff
MM
179/* The execution direction of the last resume we got. See record-full.c. */
180static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
181
182/* The async event handler for reverse/replay execution. */
183static struct async_event_handler *record_btrace_async_inferior_event_handler;
184
aef92902
MM
185/* A flag indicating that we are currently generating a core file. */
186static int record_btrace_generating_corefile;
187
f4abbc16
MM
188/* The current branch trace configuration. */
189static struct btrace_config record_btrace_conf;
190
191/* Command list for "record btrace". */
192static struct cmd_list_element *record_btrace_cmdlist;
193
d33501a5
MM
194/* Command lists for "set/show record btrace bts". */
195static struct cmd_list_element *set_record_btrace_bts_cmdlist;
196static struct cmd_list_element *show_record_btrace_bts_cmdlist;
197
b20a6524
MM
198/* Command lists for "set/show record btrace pt". */
199static struct cmd_list_element *set_record_btrace_pt_cmdlist;
200static struct cmd_list_element *show_record_btrace_pt_cmdlist;
201
4a4495d6
MM
202/* Command list for "set record btrace cpu". */
203static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
204
afedecd3
MM
205/* Print a record-btrace debug message. Use do ... while (0) to avoid
206 ambiguities when used in if statements. */
207
208#define DEBUG(msg, args...) \
209 do \
210 { \
211 if (record_debug != 0) \
212 fprintf_unfiltered (gdb_stdlog, \
213 "[record-btrace] " msg "\n", ##args); \
214 } \
215 while (0)
216
217
4a4495d6
MM
218/* Return the cpu configured by the user. Returns NULL if the cpu was
219 configured as auto. */
220const struct btrace_cpu *
221record_btrace_get_cpu (void)
222{
223 switch (record_btrace_cpu_state)
224 {
225 case CS_AUTO:
226 return nullptr;
227
228 case CS_NONE:
229 record_btrace_cpu.vendor = CV_UNKNOWN;
230 /* Fall through. */
231 case CS_CPU:
232 return &record_btrace_cpu;
233 }
234
235 error (_("Internal error: bad record btrace cpu state."));
236}
237
afedecd3 238/* Update the branch trace for the current thread and return a pointer to its
066ce621 239 thread_info.
afedecd3
MM
240
241 Throws an error if there is no thread or no trace. This function never
242 returns NULL. */
243
066ce621
MM
244static struct thread_info *
245require_btrace_thread (void)
afedecd3 246{
afedecd3
MM
247 DEBUG ("require");
248
00431a78 249 if (inferior_ptid == null_ptid)
afedecd3
MM
250 error (_("No thread."));
251
00431a78
PA
252 thread_info *tp = inferior_thread ();
253
cd4007e4
MM
254 validate_registers_access ();
255
4a4495d6 256 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 257
6e07b1d2 258 if (btrace_is_empty (tp))
afedecd3
MM
259 error (_("No trace."));
260
066ce621
MM
261 return tp;
262}
263
264/* Update the branch trace for the current thread and return a pointer to its
265 branch trace information struct.
266
267 Throws an error if there is no thread or no trace. This function never
268 returns NULL. */
269
270static struct btrace_thread_info *
271require_btrace (void)
272{
273 struct thread_info *tp;
274
275 tp = require_btrace_thread ();
276
277 return &tp->btrace;
afedecd3
MM
278}
279
280/* Enable branch tracing for one thread. Warn on errors. */
281
282static void
283record_btrace_enable_warn (struct thread_info *tp)
284{
492d29ea
PA
285 TRY
286 {
287 btrace_enable (tp, &record_btrace_conf);
288 }
289 CATCH (error, RETURN_MASK_ERROR)
290 {
291 warning ("%s", error.message);
292 }
293 END_CATCH
afedecd3
MM
294}
295
afedecd3
MM
296/* Enable automatic tracing of new threads. */
297
298static void
299record_btrace_auto_enable (void)
300{
301 DEBUG ("attach thread observer");
302
76727919
TT
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
afedecd3
MM
305}
306
307/* Disable automatic tracing of new threads. */
308
309static void
310record_btrace_auto_disable (void)
311{
afedecd3
MM
312 DEBUG ("detach thread observer");
313
76727919 314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
315}
316
70ad5bff
MM
317/* The record-btrace async event handler function. */
318
319static void
320record_btrace_handle_async_inferior_event (gdb_client_data data)
321{
322 inferior_event_handler (INF_REG_EVENT, NULL);
323}
324
c0272db5
TW
325/* See record-btrace.h. */
326
327void
328record_btrace_push_target (void)
329{
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
76727919 342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
343}
344
228f1508
SM
345/* Disable btrace on a set of threads on scope exit. */
346
347struct scoped_btrace_disable
348{
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369private:
370 std::forward_list<thread_info *> m_threads;
371};
372
d9f719f1 373/* Open target record-btrace. */
afedecd3 374
d9f719f1
PA
375static void
376record_btrace_target_open (const char *args, int from_tty)
afedecd3 377{
228f1508
SM
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
afedecd3
MM
381
382 DEBUG ("open");
383
8213266a 384 record_preopen ();
afedecd3
MM
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
08036331 389 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 391 {
f4abbc16 392 btrace_enable (tp, &record_btrace_conf);
afedecd3 393
228f1508 394 btrace_disable.add_thread (tp);
afedecd3
MM
395 }
396
c0272db5 397 record_btrace_push_target ();
afedecd3 398
228f1508 399 btrace_disable.discard ();
afedecd3
MM
400}
401
f6ac5f3d 402/* The stop_recording method of target record-btrace. */
afedecd3 403
f6ac5f3d
PA
404void
405record_btrace_target::stop_recording ()
afedecd3 406{
afedecd3
MM
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
08036331 411 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414}
415
f6ac5f3d 416/* The disconnect method of target record-btrace. */
c0272db5 417
f6ac5f3d
PA
418void
419record_btrace_target::disconnect (const char *args,
420 int from_tty)
c0272db5 421{
b6a8c27b 422 struct target_ops *beneath = this->beneath ();
c0272db5
TW
423
424 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 425 unpush_target (this);
c0272db5
TW
426
427 /* Forward disconnect. */
f6ac5f3d 428 beneath->disconnect (args, from_tty);
c0272db5
TW
429}
430
f6ac5f3d 431/* The close method of target record-btrace. */
afedecd3 432
f6ac5f3d
PA
433void
434record_btrace_target::close ()
afedecd3 435{
70ad5bff
MM
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
99c819ee
MM
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
568e808b
MM
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
08036331 445 for (thread_info *tp : all_non_exited_threads ())
568e808b 446 btrace_teardown (tp);
afedecd3
MM
447}
448
f6ac5f3d 449/* The async method of target record-btrace. */
b7d2e916 450
f6ac5f3d
PA
451void
452record_btrace_target::async (int enable)
b7d2e916 453{
6a3753b3 454 if (enable)
b7d2e916
PA
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
b6a8c27b 459 this->beneath ()->async (enable);
b7d2e916
PA
460}
461
d33501a5
MM
462/* Adjusts the size and returns a human readable size suffix. */
463
464static const char *
465record_btrace_adjust_size (unsigned int *size)
466{
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488}
489
490/* Print a BTS configuration. */
491
492static void
493record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494{
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504}
505
bc504a31 506/* Print an Intel Processor Trace configuration. */
b20a6524
MM
507
508static void
509record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510{
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520}
521
d33501a5
MM
522/* Print a branch tracing configuration. */
523
524static void
525record_btrace_print_conf (const struct btrace_config *conf)
526{
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
b20a6524
MM
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
d33501a5
MM
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545}
546
f6ac5f3d 547/* The info_record method of target record-btrace. */
afedecd3 548
f6ac5f3d
PA
549void
550record_btrace_target::info_record ()
afedecd3
MM
551{
552 struct btrace_thread_info *btinfo;
f4abbc16 553 const struct btrace_config *conf;
afedecd3 554 struct thread_info *tp;
31fd9caa 555 unsigned int insns, calls, gaps;
afedecd3
MM
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
cd4007e4
MM
563 validate_registers_access ();
564
f4abbc16
MM
565 btinfo = &tp->btrace;
566
f6ac5f3d 567 conf = ::btrace_conf (btinfo);
f4abbc16 568 if (conf != NULL)
d33501a5 569 record_btrace_print_conf (conf);
f4abbc16 570
4a4495d6 571 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 572
23a7fe75
MM
573 insns = 0;
574 calls = 0;
31fd9caa 575 gaps = 0;
23a7fe75 576
6e07b1d2 577 if (!btrace_is_empty (tp))
23a7fe75
MM
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
5de9129b 584 calls = btrace_call_number (&call);
23a7fe75
MM
585
586 btrace_insn_end (&insn, btinfo);
5de9129b 587 insns = btrace_insn_number (&insn);
31fd9caa 588
69090cee
TW
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
31fd9caa
MM
593
594 gaps = btinfo->ngaps;
23a7fe75 595 }
afedecd3 596
31fd9caa 597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
598 "for thread %s (%s).\n"), insns, calls, gaps,
599 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
600
601 if (btrace_is_replaying (tp))
602 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
603 btrace_insn_number (btinfo->replay));
afedecd3
MM
604}
605
31fd9caa
MM
606/* Print a decode error. */
607
608static void
609btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
610 enum btrace_format format)
611{
508352a9 612 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 613
112e8700 614 uiout->text (_("["));
508352a9
TW
615 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
616 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 617 {
112e8700
SM
618 uiout->text (_("decode error ("));
619 uiout->field_int ("errcode", errcode);
620 uiout->text (_("): "));
31fd9caa 621 }
112e8700
SM
622 uiout->text (errstr);
623 uiout->text (_("]\n"));
31fd9caa
MM
624}
625
afedecd3
MM
626/* Print an unsigned int. */
627
628static void
629ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
630{
112e8700 631 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
632}
633
f94cc897
MM
634/* A range of source lines. */
635
636struct btrace_line_range
637{
638 /* The symtab this line is from. */
639 struct symtab *symtab;
640
641 /* The first line (inclusive). */
642 int begin;
643
644 /* The last line (exclusive). */
645 int end;
646};
647
648/* Construct a line range. */
649
650static struct btrace_line_range
651btrace_mk_line_range (struct symtab *symtab, int begin, int end)
652{
653 struct btrace_line_range range;
654
655 range.symtab = symtab;
656 range.begin = begin;
657 range.end = end;
658
659 return range;
660}
661
662/* Add a line to a line range. */
663
664static struct btrace_line_range
665btrace_line_range_add (struct btrace_line_range range, int line)
666{
667 if (range.end <= range.begin)
668 {
669 /* This is the first entry. */
670 range.begin = line;
671 range.end = line + 1;
672 }
673 else if (line < range.begin)
674 range.begin = line;
675 else if (range.end < line)
676 range.end = line;
677
678 return range;
679}
680
681/* Return non-zero if RANGE is empty, zero otherwise. */
682
683static int
684btrace_line_range_is_empty (struct btrace_line_range range)
685{
686 return range.end <= range.begin;
687}
688
689/* Return non-zero if LHS contains RHS, zero otherwise. */
690
691static int
692btrace_line_range_contains_range (struct btrace_line_range lhs,
693 struct btrace_line_range rhs)
694{
695 return ((lhs.symtab == rhs.symtab)
696 && (lhs.begin <= rhs.begin)
697 && (rhs.end <= lhs.end));
698}
699
700/* Find the line range associated with PC. */
701
702static struct btrace_line_range
703btrace_find_line_range (CORE_ADDR pc)
704{
705 struct btrace_line_range range;
706 struct linetable_entry *lines;
707 struct linetable *ltable;
708 struct symtab *symtab;
709 int nlines, i;
710
711 symtab = find_pc_line_symtab (pc);
712 if (symtab == NULL)
713 return btrace_mk_line_range (NULL, 0, 0);
714
715 ltable = SYMTAB_LINETABLE (symtab);
716 if (ltable == NULL)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 nlines = ltable->nitems;
720 lines = ltable->item;
721 if (nlines <= 0)
722 return btrace_mk_line_range (symtab, 0, 0);
723
724 range = btrace_mk_line_range (symtab, 0, 0);
725 for (i = 0; i < nlines - 1; i++)
726 {
727 if ((lines[i].pc == pc) && (lines[i].line != 0))
728 range = btrace_line_range_add (range, lines[i].line);
729 }
730
731 return range;
732}
733
734/* Print source lines in LINES to UIOUT.
735
736 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
737 instructions corresponding to that source line. When printing a new source
738 line, we do the cleanups for the open chain and open a new cleanup chain for
739 the new source line. If the source line range in LINES is not empty, this
740 function will leave the cleanup chain for the last printed source line open
741 so instructions can be added to it. */
742
743static void
744btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
745 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
746 gdb::optional<ui_out_emit_list> *asm_list,
747 gdb_disassembly_flags flags)
f94cc897 748{
8d297bbf 749 print_source_lines_flags psl_flags;
f94cc897 750
f94cc897
MM
751 if (flags & DISASSEMBLY_FILENAME)
752 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
753
7ea78b59 754 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 755 {
7ea78b59 756 asm_list->reset ();
f94cc897 757
7ea78b59 758 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
759
760 print_source_lines (lines.symtab, line, line + 1, psl_flags);
761
7ea78b59 762 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
763 }
764}
765
afedecd3
MM
766/* Disassemble a section of the recorded instruction trace. */
767
768static void
23a7fe75 769btrace_insn_history (struct ui_out *uiout,
31fd9caa 770 const struct btrace_thread_info *btinfo,
23a7fe75 771 const struct btrace_insn_iterator *begin,
9a24775b
PA
772 const struct btrace_insn_iterator *end,
773 gdb_disassembly_flags flags)
afedecd3 774{
9a24775b
PA
775 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
776 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 777
f94cc897
MM
778 flags |= DISASSEMBLY_SPECULATIVE;
779
7ea78b59
SM
780 struct gdbarch *gdbarch = target_gdbarch ();
781 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 782
7ea78b59 783 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 784
7ea78b59
SM
785 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
786 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 787
8b172ce7
PA
788 gdb_pretty_print_disassembler disasm (gdbarch);
789
7ea78b59
SM
790 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
791 btrace_insn_next (&it, 1))
afedecd3 792 {
23a7fe75
MM
793 const struct btrace_insn *insn;
794
795 insn = btrace_insn_get (&it);
796
31fd9caa
MM
797 /* A NULL instruction indicates a gap in the trace. */
798 if (insn == NULL)
799 {
800 const struct btrace_config *conf;
801
802 conf = btrace_conf (btinfo);
afedecd3 803
31fd9caa
MM
804 /* We have trace so we must have a configuration. */
805 gdb_assert (conf != NULL);
806
69090cee
TW
807 uiout->field_fmt ("insn-number", "%u",
808 btrace_insn_number (&it));
809 uiout->text ("\t");
810
811 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
812 conf->format);
813 }
814 else
815 {
f94cc897 816 struct disasm_insn dinsn;
da8c46d2 817
f94cc897 818 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 819 {
f94cc897
MM
820 struct btrace_line_range lines;
821
822 lines = btrace_find_line_range (insn->pc);
823 if (!btrace_line_range_is_empty (lines)
824 && !btrace_line_range_contains_range (last_lines, lines))
825 {
7ea78b59
SM
826 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
827 flags);
f94cc897
MM
828 last_lines = lines;
829 }
7ea78b59 830 else if (!src_and_asm_tuple.has_value ())
f94cc897 831 {
7ea78b59
SM
832 gdb_assert (!asm_list.has_value ());
833
834 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
835
f94cc897 836 /* No source information. */
7ea78b59 837 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
838 }
839
7ea78b59
SM
840 gdb_assert (src_and_asm_tuple.has_value ());
841 gdb_assert (asm_list.has_value ());
da8c46d2 842 }
da8c46d2 843
f94cc897
MM
844 memset (&dinsn, 0, sizeof (dinsn));
845 dinsn.number = btrace_insn_number (&it);
846 dinsn.addr = insn->pc;
31fd9caa 847
da8c46d2 848 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 849 dinsn.is_speculative = 1;
da8c46d2 850
8b172ce7 851 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 852 }
afedecd3
MM
853 }
854}
855
f6ac5f3d 856/* The insn_history method of target record-btrace. */
afedecd3 857
f6ac5f3d
PA
858void
859record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
860{
861 struct btrace_thread_info *btinfo;
23a7fe75
MM
862 struct btrace_insn_history *history;
863 struct btrace_insn_iterator begin, end;
afedecd3 864 struct ui_out *uiout;
23a7fe75 865 unsigned int context, covered;
afedecd3
MM
866
867 uiout = current_uiout;
2e783024 868 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 869 context = abs (size);
afedecd3
MM
870 if (context == 0)
871 error (_("Bad record instruction-history-size."));
872
23a7fe75
MM
873 btinfo = require_btrace ();
874 history = btinfo->insn_history;
875 if (history == NULL)
afedecd3 876 {
07bbe694 877 struct btrace_insn_iterator *replay;
afedecd3 878
9a24775b 879 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 880
07bbe694
MM
881 /* If we're replaying, we start at the replay position. Otherwise, we
882 start at the tail of the trace. */
883 replay = btinfo->replay;
884 if (replay != NULL)
885 begin = *replay;
886 else
887 btrace_insn_end (&begin, btinfo);
888
889 /* We start from here and expand in the requested direction. Then we
890 expand in the other direction, as well, to fill up any remaining
891 context. */
892 end = begin;
893 if (size < 0)
894 {
895 /* We want the current position covered, as well. */
896 covered = btrace_insn_next (&end, 1);
897 covered += btrace_insn_prev (&begin, context - covered);
898 covered += btrace_insn_next (&end, context - covered);
899 }
900 else
901 {
902 covered = btrace_insn_next (&end, context);
903 covered += btrace_insn_prev (&begin, context - covered);
904 }
afedecd3
MM
905 }
906 else
907 {
23a7fe75
MM
908 begin = history->begin;
909 end = history->end;
afedecd3 910
9a24775b 911 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 912 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 913
23a7fe75
MM
914 if (size < 0)
915 {
916 end = begin;
917 covered = btrace_insn_prev (&begin, context);
918 }
919 else
920 {
921 begin = end;
922 covered = btrace_insn_next (&end, context);
923 }
afedecd3
MM
924 }
925
23a7fe75 926 if (covered > 0)
31fd9caa 927 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
928 else
929 {
930 if (size < 0)
931 printf_unfiltered (_("At the start of the branch trace record.\n"));
932 else
933 printf_unfiltered (_("At the end of the branch trace record.\n"));
934 }
afedecd3 935
23a7fe75 936 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
937}
938
f6ac5f3d 939/* The insn_history_range method of target record-btrace. */
afedecd3 940
f6ac5f3d
PA
941void
942record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
943 gdb_disassembly_flags flags)
afedecd3
MM
944{
945 struct btrace_thread_info *btinfo;
23a7fe75 946 struct btrace_insn_iterator begin, end;
afedecd3 947 struct ui_out *uiout;
23a7fe75
MM
948 unsigned int low, high;
949 int found;
afedecd3
MM
950
951 uiout = current_uiout;
2e783024 952 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
953 low = from;
954 high = to;
afedecd3 955
9a24775b 956 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
957
958 /* Check for wrap-arounds. */
23a7fe75 959 if (low != from || high != to)
afedecd3
MM
960 error (_("Bad range."));
961
0688d04e 962 if (high < low)
afedecd3
MM
963 error (_("Bad range."));
964
23a7fe75 965 btinfo = require_btrace ();
afedecd3 966
23a7fe75
MM
967 found = btrace_find_insn_by_number (&begin, btinfo, low);
968 if (found == 0)
969 error (_("Range out of bounds."));
afedecd3 970
23a7fe75
MM
971 found = btrace_find_insn_by_number (&end, btinfo, high);
972 if (found == 0)
0688d04e
MM
973 {
974 /* Silently truncate the range. */
975 btrace_insn_end (&end, btinfo);
976 }
977 else
978 {
979 /* We want both begin and end to be inclusive. */
980 btrace_insn_next (&end, 1);
981 }
afedecd3 982
31fd9caa 983 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 984 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
985}
986
f6ac5f3d 987/* The insn_history_from method of target record-btrace. */
afedecd3 988
f6ac5f3d
PA
989void
990record_btrace_target::insn_history_from (ULONGEST from, int size,
991 gdb_disassembly_flags flags)
afedecd3
MM
992{
993 ULONGEST begin, end, context;
994
995 context = abs (size);
0688d04e
MM
996 if (context == 0)
997 error (_("Bad record instruction-history-size."));
afedecd3
MM
998
999 if (size < 0)
1000 {
1001 end = from;
1002
1003 if (from < context)
1004 begin = 0;
1005 else
0688d04e 1006 begin = from - context + 1;
afedecd3
MM
1007 }
1008 else
1009 {
1010 begin = from;
0688d04e 1011 end = from + context - 1;
afedecd3
MM
1012
1013 /* Check for wrap-around. */
1014 if (end < begin)
1015 end = ULONGEST_MAX;
1016 }
1017
f6ac5f3d 1018 insn_history_range (begin, end, flags);
afedecd3
MM
1019}
1020
1021/* Print the instruction number range for a function call history line. */
1022
1023static void
23a7fe75
MM
1024btrace_call_history_insn_range (struct ui_out *uiout,
1025 const struct btrace_function *bfun)
afedecd3 1026{
7acbe133
MM
1027 unsigned int begin, end, size;
1028
0860c437 1029 size = bfun->insn.size ();
7acbe133 1030 gdb_assert (size > 0);
afedecd3 1031
23a7fe75 1032 begin = bfun->insn_offset;
7acbe133 1033 end = begin + size - 1;
afedecd3 1034
23a7fe75 1035 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 1036 uiout->text (",");
23a7fe75 1037 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
1038}
1039
ce0dfbea
MM
1040/* Compute the lowest and highest source line for the instructions in BFUN
1041 and return them in PBEGIN and PEND.
1042 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1043 result from inlining or macro expansion. */
1044
1045static void
1046btrace_compute_src_line_range (const struct btrace_function *bfun,
1047 int *pbegin, int *pend)
1048{
ce0dfbea
MM
1049 struct symtab *symtab;
1050 struct symbol *sym;
ce0dfbea
MM
1051 int begin, end;
1052
1053 begin = INT_MAX;
1054 end = INT_MIN;
1055
1056 sym = bfun->sym;
1057 if (sym == NULL)
1058 goto out;
1059
1060 symtab = symbol_symtab (sym);
1061
0860c437 1062 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1063 {
1064 struct symtab_and_line sal;
1065
0860c437 1066 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1067 if (sal.symtab != symtab || sal.line == 0)
1068 continue;
1069
325fac50
PA
1070 begin = std::min (begin, sal.line);
1071 end = std::max (end, sal.line);
ce0dfbea
MM
1072 }
1073
1074 out:
1075 *pbegin = begin;
1076 *pend = end;
1077}
1078
afedecd3
MM
1079/* Print the source line information for a function call history line. */
1080
1081static void
23a7fe75
MM
1082btrace_call_history_src_line (struct ui_out *uiout,
1083 const struct btrace_function *bfun)
afedecd3
MM
1084{
1085 struct symbol *sym;
23a7fe75 1086 int begin, end;
afedecd3
MM
1087
1088 sym = bfun->sym;
1089 if (sym == NULL)
1090 return;
1091
112e8700 1092 uiout->field_string ("file",
cbe56571
TT
1093 symtab_to_filename_for_display (symbol_symtab (sym)),
1094 ui_out_style_kind::FILE);
afedecd3 1095
ce0dfbea 1096 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1097 if (end < begin)
afedecd3
MM
1098 return;
1099
112e8700
SM
1100 uiout->text (":");
1101 uiout->field_int ("min line", begin);
afedecd3 1102
23a7fe75 1103 if (end == begin)
afedecd3
MM
1104 return;
1105
112e8700
SM
1106 uiout->text (",");
1107 uiout->field_int ("max line", end);
afedecd3
MM
1108}
1109
0b722aec
MM
1110/* Get the name of a branch trace function. */
1111
1112static const char *
1113btrace_get_bfun_name (const struct btrace_function *bfun)
1114{
1115 struct minimal_symbol *msym;
1116 struct symbol *sym;
1117
1118 if (bfun == NULL)
1119 return "??";
1120
1121 msym = bfun->msym;
1122 sym = bfun->sym;
1123
1124 if (sym != NULL)
1125 return SYMBOL_PRINT_NAME (sym);
1126 else if (msym != NULL)
efd66ac6 1127 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1128 else
1129 return "??";
1130}
1131
afedecd3
MM
1132/* Disassemble a section of the recorded function trace. */
1133
1134static void
23a7fe75 1135btrace_call_history (struct ui_out *uiout,
8710b709 1136 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1137 const struct btrace_call_iterator *begin,
1138 const struct btrace_call_iterator *end,
8d297bbf 1139 int int_flags)
afedecd3 1140{
23a7fe75 1141 struct btrace_call_iterator it;
8d297bbf 1142 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1143
8d297bbf 1144 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1145 btrace_call_number (end));
afedecd3 1146
23a7fe75 1147 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1148 {
23a7fe75
MM
1149 const struct btrace_function *bfun;
1150 struct minimal_symbol *msym;
1151 struct symbol *sym;
1152
1153 bfun = btrace_call_get (&it);
23a7fe75 1154 sym = bfun->sym;
0b722aec 1155 msym = bfun->msym;
23a7fe75 1156
afedecd3 1157 /* Print the function index. */
23a7fe75 1158 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1159 uiout->text ("\t");
afedecd3 1160
31fd9caa
MM
1161 /* Indicate gaps in the trace. */
1162 if (bfun->errcode != 0)
1163 {
1164 const struct btrace_config *conf;
1165
1166 conf = btrace_conf (btinfo);
1167
1168 /* We have trace so we must have a configuration. */
1169 gdb_assert (conf != NULL);
1170
1171 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1172
1173 continue;
1174 }
1175
8710b709
MM
1176 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1177 {
1178 int level = bfun->level + btinfo->level, i;
1179
1180 for (i = 0; i < level; ++i)
112e8700 1181 uiout->text (" ");
8710b709
MM
1182 }
1183
1184 if (sym != NULL)
cbe56571
TT
1185 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1186 ui_out_style_kind::FUNCTION);
8710b709 1187 else if (msym != NULL)
cbe56571
TT
1188 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1189 ui_out_style_kind::FUNCTION);
112e8700 1190 else if (!uiout->is_mi_like_p ())
cbe56571
TT
1191 uiout->field_string ("function", "??",
1192 ui_out_style_kind::FUNCTION);
8710b709 1193
1e038f67 1194 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1195 {
112e8700 1196 uiout->text (_("\tinst "));
23a7fe75 1197 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1198 }
1199
1e038f67 1200 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1201 {
112e8700 1202 uiout->text (_("\tat "));
23a7fe75 1203 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1204 }
1205
112e8700 1206 uiout->text ("\n");
afedecd3
MM
1207 }
1208}
1209
f6ac5f3d 1210/* The call_history method of target record-btrace. */
afedecd3 1211
f6ac5f3d
PA
1212void
1213record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1214{
1215 struct btrace_thread_info *btinfo;
23a7fe75
MM
1216 struct btrace_call_history *history;
1217 struct btrace_call_iterator begin, end;
afedecd3 1218 struct ui_out *uiout;
23a7fe75 1219 unsigned int context, covered;
afedecd3
MM
1220
1221 uiout = current_uiout;
2e783024 1222 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1223 context = abs (size);
afedecd3
MM
1224 if (context == 0)
1225 error (_("Bad record function-call-history-size."));
1226
23a7fe75
MM
1227 btinfo = require_btrace ();
1228 history = btinfo->call_history;
1229 if (history == NULL)
afedecd3 1230 {
07bbe694 1231 struct btrace_insn_iterator *replay;
afedecd3 1232
0cb7c7b0 1233 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1234
07bbe694
MM
1235 /* If we're replaying, we start at the replay position. Otherwise, we
1236 start at the tail of the trace. */
1237 replay = btinfo->replay;
1238 if (replay != NULL)
1239 {
07bbe694 1240 begin.btinfo = btinfo;
a0f1b963 1241 begin.index = replay->call_index;
07bbe694
MM
1242 }
1243 else
1244 btrace_call_end (&begin, btinfo);
1245
1246 /* We start from here and expand in the requested direction. Then we
1247 expand in the other direction, as well, to fill up any remaining
1248 context. */
1249 end = begin;
1250 if (size < 0)
1251 {
1252 /* We want the current position covered, as well. */
1253 covered = btrace_call_next (&end, 1);
1254 covered += btrace_call_prev (&begin, context - covered);
1255 covered += btrace_call_next (&end, context - covered);
1256 }
1257 else
1258 {
1259 covered = btrace_call_next (&end, context);
1260 covered += btrace_call_prev (&begin, context- covered);
1261 }
afedecd3
MM
1262 }
1263 else
1264 {
23a7fe75
MM
1265 begin = history->begin;
1266 end = history->end;
afedecd3 1267
0cb7c7b0 1268 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1269 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1270
23a7fe75
MM
1271 if (size < 0)
1272 {
1273 end = begin;
1274 covered = btrace_call_prev (&begin, context);
1275 }
1276 else
1277 {
1278 begin = end;
1279 covered = btrace_call_next (&end, context);
1280 }
afedecd3
MM
1281 }
1282
23a7fe75 1283 if (covered > 0)
8710b709 1284 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1285 else
1286 {
1287 if (size < 0)
1288 printf_unfiltered (_("At the start of the branch trace record.\n"));
1289 else
1290 printf_unfiltered (_("At the end of the branch trace record.\n"));
1291 }
afedecd3 1292
23a7fe75 1293 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1294}
1295
f6ac5f3d 1296/* The call_history_range method of target record-btrace. */
afedecd3 1297
f6ac5f3d
PA
1298void
1299record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1300 record_print_flags flags)
afedecd3
MM
1301{
1302 struct btrace_thread_info *btinfo;
23a7fe75 1303 struct btrace_call_iterator begin, end;
afedecd3 1304 struct ui_out *uiout;
23a7fe75
MM
1305 unsigned int low, high;
1306 int found;
afedecd3
MM
1307
1308 uiout = current_uiout;
2e783024 1309 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1310 low = from;
1311 high = to;
afedecd3 1312
0cb7c7b0 1313 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1314
1315 /* Check for wrap-arounds. */
23a7fe75 1316 if (low != from || high != to)
afedecd3
MM
1317 error (_("Bad range."));
1318
0688d04e 1319 if (high < low)
afedecd3
MM
1320 error (_("Bad range."));
1321
23a7fe75 1322 btinfo = require_btrace ();
afedecd3 1323
23a7fe75
MM
1324 found = btrace_find_call_by_number (&begin, btinfo, low);
1325 if (found == 0)
1326 error (_("Range out of bounds."));
afedecd3 1327
23a7fe75
MM
1328 found = btrace_find_call_by_number (&end, btinfo, high);
1329 if (found == 0)
0688d04e
MM
1330 {
1331 /* Silently truncate the range. */
1332 btrace_call_end (&end, btinfo);
1333 }
1334 else
1335 {
1336 /* We want both begin and end to be inclusive. */
1337 btrace_call_next (&end, 1);
1338 }
afedecd3 1339
8710b709 1340 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1341 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1342}
1343
f6ac5f3d 1344/* The call_history_from method of target record-btrace. */
afedecd3 1345
f6ac5f3d
PA
1346void
1347record_btrace_target::call_history_from (ULONGEST from, int size,
1348 record_print_flags flags)
afedecd3
MM
1349{
1350 ULONGEST begin, end, context;
1351
1352 context = abs (size);
0688d04e
MM
1353 if (context == 0)
1354 error (_("Bad record function-call-history-size."));
afedecd3
MM
1355
1356 if (size < 0)
1357 {
1358 end = from;
1359
1360 if (from < context)
1361 begin = 0;
1362 else
0688d04e 1363 begin = from - context + 1;
afedecd3
MM
1364 }
1365 else
1366 {
1367 begin = from;
0688d04e 1368 end = from + context - 1;
afedecd3
MM
1369
1370 /* Check for wrap-around. */
1371 if (end < begin)
1372 end = ULONGEST_MAX;
1373 }
1374
f6ac5f3d 1375 call_history_range ( begin, end, flags);
afedecd3
MM
1376}
1377
f6ac5f3d 1378/* The record_method method of target record-btrace. */
b158a20f 1379
f6ac5f3d
PA
1380enum record_method
1381record_btrace_target::record_method (ptid_t ptid)
b158a20f 1382{
b158a20f
TW
1383 struct thread_info * const tp = find_thread_ptid (ptid);
1384
1385 if (tp == NULL)
1386 error (_("No thread."));
1387
1388 if (tp->btrace.target == NULL)
1389 return RECORD_METHOD_NONE;
1390
1391 return RECORD_METHOD_BTRACE;
1392}
1393
f6ac5f3d 1394/* The record_is_replaying method of target record-btrace. */
07bbe694 1395
57810aa7 1396bool
f6ac5f3d 1397record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1398{
08036331
PA
1399 for (thread_info *tp : all_non_exited_threads (ptid))
1400 if (btrace_is_replaying (tp))
57810aa7 1401 return true;
07bbe694 1402
57810aa7 1403 return false;
07bbe694
MM
1404}
1405
f6ac5f3d 1406/* The record_will_replay method of target record-btrace. */
7ff27e9b 1407
57810aa7 1408bool
f6ac5f3d 1409record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1410{
f6ac5f3d 1411 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1412}
1413
f6ac5f3d 1414/* The xfer_partial method of target record-btrace. */
633785ff 1415
f6ac5f3d
PA
1416enum target_xfer_status
1417record_btrace_target::xfer_partial (enum target_object object,
1418 const char *annex, gdb_byte *readbuf,
1419 const gdb_byte *writebuf, ULONGEST offset,
1420 ULONGEST len, ULONGEST *xfered_len)
633785ff 1421{
633785ff 1422 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1423 if (replay_memory_access == replay_memory_access_read_only
aef92902 1424 && !record_btrace_generating_corefile
f6ac5f3d 1425 && record_is_replaying (inferior_ptid))
633785ff
MM
1426 {
1427 switch (object)
1428 {
1429 case TARGET_OBJECT_MEMORY:
1430 {
1431 struct target_section *section;
1432
1433 /* We do not allow writing memory in general. */
1434 if (writebuf != NULL)
9b409511
YQ
1435 {
1436 *xfered_len = len;
bc113b4e 1437 return TARGET_XFER_UNAVAILABLE;
9b409511 1438 }
633785ff
MM
1439
1440 /* We allow reading readonly memory. */
f6ac5f3d 1441 section = target_section_by_addr (this, offset);
633785ff
MM
1442 if (section != NULL)
1443 {
1444 /* Check if the section we found is readonly. */
1445 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1446 section->the_bfd_section)
1447 & SEC_READONLY) != 0)
1448 {
1449 /* Truncate the request to fit into this section. */
325fac50 1450 len = std::min (len, section->endaddr - offset);
633785ff
MM
1451 break;
1452 }
1453 }
1454
9b409511 1455 *xfered_len = len;
bc113b4e 1456 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1457 }
1458 }
1459 }
1460
1461 /* Forward the request. */
b6a8c27b
PA
1462 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1463 offset, len, xfered_len);
633785ff
MM
1464}
1465
f6ac5f3d 1466/* The insert_breakpoint method of target record-btrace. */
633785ff 1467
f6ac5f3d
PA
1468int
1469record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1470 struct bp_target_info *bp_tgt)
633785ff 1471{
67b5c0c1
MM
1472 const char *old;
1473 int ret;
633785ff
MM
1474
1475 /* Inserting breakpoints requires accessing memory. Allow it for the
1476 duration of this function. */
67b5c0c1
MM
1477 old = replay_memory_access;
1478 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1479
1480 ret = 0;
492d29ea
PA
1481 TRY
1482 {
b6a8c27b 1483 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1484 }
492d29ea
PA
1485 CATCH (except, RETURN_MASK_ALL)
1486 {
6c63c96a 1487 replay_memory_access = old;
492d29ea
PA
1488 throw_exception (except);
1489 }
1490 END_CATCH
6c63c96a 1491 replay_memory_access = old;
633785ff
MM
1492
1493 return ret;
1494}
1495
f6ac5f3d 1496/* The remove_breakpoint method of target record-btrace. */
633785ff 1497
f6ac5f3d
PA
1498int
1499record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1500 struct bp_target_info *bp_tgt,
1501 enum remove_bp_reason reason)
633785ff 1502{
67b5c0c1
MM
1503 const char *old;
1504 int ret;
633785ff
MM
1505
1506 /* Removing breakpoints requires accessing memory. Allow it for the
1507 duration of this function. */
67b5c0c1
MM
1508 old = replay_memory_access;
1509 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1510
1511 ret = 0;
492d29ea
PA
1512 TRY
1513 {
b6a8c27b 1514 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1515 }
492d29ea
PA
1516 CATCH (except, RETURN_MASK_ALL)
1517 {
6c63c96a 1518 replay_memory_access = old;
492d29ea
PA
1519 throw_exception (except);
1520 }
1521 END_CATCH
6c63c96a 1522 replay_memory_access = old;
633785ff
MM
1523
1524 return ret;
1525}
1526
f6ac5f3d 1527/* The fetch_registers method of target record-btrace. */
1f3ef581 1528
f6ac5f3d
PA
1529void
1530record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1531{
1532 struct btrace_insn_iterator *replay;
1533 struct thread_info *tp;
1534
222312d3 1535 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1536 gdb_assert (tp != NULL);
1537
1538 replay = tp->btrace.replay;
aef92902 1539 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1540 {
1541 const struct btrace_insn *insn;
1542 struct gdbarch *gdbarch;
1543 int pcreg;
1544
ac7936df 1545 gdbarch = regcache->arch ();
1f3ef581
MM
1546 pcreg = gdbarch_pc_regnum (gdbarch);
1547 if (pcreg < 0)
1548 return;
1549
1550 /* We can only provide the PC register. */
1551 if (regno >= 0 && regno != pcreg)
1552 return;
1553
1554 insn = btrace_insn_get (replay);
1555 gdb_assert (insn != NULL);
1556
73e1c03f 1557 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1558 }
1559 else
b6a8c27b 1560 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1561}
1562
f6ac5f3d 1563/* The store_registers method of target record-btrace. */
1f3ef581 1564
f6ac5f3d
PA
1565void
1566record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1567{
a52eab48 1568 if (!record_btrace_generating_corefile
222312d3 1569 && record_is_replaying (regcache->ptid ()))
4d10e986 1570 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1571
1572 gdb_assert (may_write_registers != 0);
1573
b6a8c27b 1574 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1575}
1576
f6ac5f3d 1577/* The prepare_to_store method of target record-btrace. */
1f3ef581 1578
f6ac5f3d
PA
1579void
1580record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1581{
a52eab48 1582 if (!record_btrace_generating_corefile
222312d3 1583 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1584 return;
1585
b6a8c27b 1586 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1587}
1588
0b722aec
MM
1589/* The branch trace frame cache. */
1590
1591struct btrace_frame_cache
1592{
1593 /* The thread. */
1594 struct thread_info *tp;
1595
1596 /* The frame info. */
1597 struct frame_info *frame;
1598
1599 /* The branch trace function segment. */
1600 const struct btrace_function *bfun;
1601};
1602
1603/* A struct btrace_frame_cache hash table indexed by NEXT. */
1604
1605static htab_t bfcache;
1606
1607/* hash_f for htab_create_alloc of bfcache. */
1608
1609static hashval_t
1610bfcache_hash (const void *arg)
1611{
19ba03f4
SM
1612 const struct btrace_frame_cache *cache
1613 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1614
1615 return htab_hash_pointer (cache->frame);
1616}
1617
1618/* eq_f for htab_create_alloc of bfcache. */
1619
1620static int
1621bfcache_eq (const void *arg1, const void *arg2)
1622{
19ba03f4
SM
1623 const struct btrace_frame_cache *cache1
1624 = (const struct btrace_frame_cache *) arg1;
1625 const struct btrace_frame_cache *cache2
1626 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1627
1628 return cache1->frame == cache2->frame;
1629}
1630
1631/* Create a new btrace frame cache. */
1632
1633static struct btrace_frame_cache *
1634bfcache_new (struct frame_info *frame)
1635{
1636 struct btrace_frame_cache *cache;
1637 void **slot;
1638
1639 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1640 cache->frame = frame;
1641
1642 slot = htab_find_slot (bfcache, cache, INSERT);
1643 gdb_assert (*slot == NULL);
1644 *slot = cache;
1645
1646 return cache;
1647}
1648
1649/* Extract the branch trace function from a branch trace frame. */
1650
1651static const struct btrace_function *
1652btrace_get_frame_function (struct frame_info *frame)
1653{
1654 const struct btrace_frame_cache *cache;
0b722aec
MM
1655 struct btrace_frame_cache pattern;
1656 void **slot;
1657
1658 pattern.frame = frame;
1659
1660 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1661 if (slot == NULL)
1662 return NULL;
1663
19ba03f4 1664 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1665 return cache->bfun;
1666}
1667
cecac1ab
MM
1668/* Implement stop_reason method for record_btrace_frame_unwind. */
1669
1670static enum unwind_stop_reason
1671record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1672 void **this_cache)
1673{
0b722aec
MM
1674 const struct btrace_frame_cache *cache;
1675 const struct btrace_function *bfun;
1676
19ba03f4 1677 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1678 bfun = cache->bfun;
1679 gdb_assert (bfun != NULL);
1680
42bfe59e 1681 if (bfun->up == 0)
0b722aec
MM
1682 return UNWIND_UNAVAILABLE;
1683
1684 return UNWIND_NO_REASON;
cecac1ab
MM
1685}
1686
1687/* Implement this_id method for record_btrace_frame_unwind. */
1688
1689static void
1690record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1691 struct frame_id *this_id)
1692{
0b722aec
MM
1693 const struct btrace_frame_cache *cache;
1694 const struct btrace_function *bfun;
4aeb0dfc 1695 struct btrace_call_iterator it;
0b722aec
MM
1696 CORE_ADDR code, special;
1697
19ba03f4 1698 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1699
1700 bfun = cache->bfun;
1701 gdb_assert (bfun != NULL);
1702
4aeb0dfc
TW
1703 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1704 bfun = btrace_call_get (&it);
0b722aec
MM
1705
1706 code = get_frame_func (this_frame);
1707 special = bfun->number;
1708
1709 *this_id = frame_id_build_unavailable_stack_special (code, special);
1710
1711 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1712 btrace_get_bfun_name (cache->bfun),
1713 core_addr_to_string_nz (this_id->code_addr),
1714 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1715}
1716
1717/* Implement prev_register method for record_btrace_frame_unwind. */
1718
1719static struct value *
1720record_btrace_frame_prev_register (struct frame_info *this_frame,
1721 void **this_cache,
1722 int regnum)
1723{
0b722aec
MM
1724 const struct btrace_frame_cache *cache;
1725 const struct btrace_function *bfun, *caller;
42bfe59e 1726 struct btrace_call_iterator it;
0b722aec
MM
1727 struct gdbarch *gdbarch;
1728 CORE_ADDR pc;
1729 int pcreg;
1730
1731 gdbarch = get_frame_arch (this_frame);
1732 pcreg = gdbarch_pc_regnum (gdbarch);
1733 if (pcreg < 0 || regnum != pcreg)
1734 throw_error (NOT_AVAILABLE_ERROR,
1735 _("Registers are not available in btrace record history"));
1736
19ba03f4 1737 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1738 bfun = cache->bfun;
1739 gdb_assert (bfun != NULL);
1740
42bfe59e 1741 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1742 throw_error (NOT_AVAILABLE_ERROR,
1743 _("No caller in btrace record history"));
1744
42bfe59e
TW
1745 caller = btrace_call_get (&it);
1746
0b722aec 1747 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1748 pc = caller->insn.front ().pc;
0b722aec
MM
1749 else
1750 {
0860c437 1751 pc = caller->insn.back ().pc;
0b722aec
MM
1752 pc += gdb_insn_length (gdbarch, pc);
1753 }
1754
1755 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1756 btrace_get_bfun_name (bfun), bfun->level,
1757 core_addr_to_string_nz (pc));
1758
1759 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1760}
1761
1762/* Implement sniffer method for record_btrace_frame_unwind. */
1763
1764static int
1765record_btrace_frame_sniffer (const struct frame_unwind *self,
1766 struct frame_info *this_frame,
1767 void **this_cache)
1768{
0b722aec
MM
1769 const struct btrace_function *bfun;
1770 struct btrace_frame_cache *cache;
cecac1ab 1771 struct thread_info *tp;
0b722aec 1772 struct frame_info *next;
cecac1ab
MM
1773
1774 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1775 tp = inferior_thread ();
cecac1ab 1776
0b722aec
MM
1777 bfun = NULL;
1778 next = get_next_frame (this_frame);
1779 if (next == NULL)
1780 {
1781 const struct btrace_insn_iterator *replay;
1782
1783 replay = tp->btrace.replay;
1784 if (replay != NULL)
08c3f6d2 1785 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1786 }
1787 else
1788 {
1789 const struct btrace_function *callee;
42bfe59e 1790 struct btrace_call_iterator it;
0b722aec
MM
1791
1792 callee = btrace_get_frame_function (next);
42bfe59e
TW
1793 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1794 return 0;
1795
1796 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1797 return 0;
1798
1799 bfun = btrace_call_get (&it);
0b722aec
MM
1800 }
1801
1802 if (bfun == NULL)
1803 return 0;
1804
1805 DEBUG ("[frame] sniffed frame for %s on level %d",
1806 btrace_get_bfun_name (bfun), bfun->level);
1807
1808 /* This is our frame. Initialize the frame cache. */
1809 cache = bfcache_new (this_frame);
1810 cache->tp = tp;
1811 cache->bfun = bfun;
1812
1813 *this_cache = cache;
1814 return 1;
1815}
1816
1817/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1818
1819static int
1820record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1821 struct frame_info *this_frame,
1822 void **this_cache)
1823{
1824 const struct btrace_function *bfun, *callee;
1825 struct btrace_frame_cache *cache;
42bfe59e 1826 struct btrace_call_iterator it;
0b722aec 1827 struct frame_info *next;
42bfe59e 1828 struct thread_info *tinfo;
0b722aec
MM
1829
1830 next = get_next_frame (this_frame);
1831 if (next == NULL)
1832 return 0;
1833
1834 callee = btrace_get_frame_function (next);
1835 if (callee == NULL)
1836 return 0;
1837
1838 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1839 return 0;
1840
00431a78 1841 tinfo = inferior_thread ();
42bfe59e 1842 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1843 return 0;
1844
42bfe59e
TW
1845 bfun = btrace_call_get (&it);
1846
0b722aec
MM
1847 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1848 btrace_get_bfun_name (bfun), bfun->level);
1849
1850 /* This is our frame. Initialize the frame cache. */
1851 cache = bfcache_new (this_frame);
42bfe59e 1852 cache->tp = tinfo;
0b722aec
MM
1853 cache->bfun = bfun;
1854
1855 *this_cache = cache;
1856 return 1;
1857}
1858
1859static void
1860record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1861{
1862 struct btrace_frame_cache *cache;
1863 void **slot;
1864
19ba03f4 1865 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1866
1867 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1868 gdb_assert (slot != NULL);
1869
1870 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1871}
1872
1873/* btrace recording does not store previous memory content, neither the stack
1874 frames content. Any unwinding would return errorneous results as the stack
1875 contents no longer matches the changed PC value restored from history.
1876 Therefore this unwinder reports any possibly unwound registers as
1877 <unavailable>. */
1878
0b722aec 1879const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1880{
1881 NORMAL_FRAME,
1882 record_btrace_frame_unwind_stop_reason,
1883 record_btrace_frame_this_id,
1884 record_btrace_frame_prev_register,
1885 NULL,
0b722aec
MM
1886 record_btrace_frame_sniffer,
1887 record_btrace_frame_dealloc_cache
1888};
1889
1890const struct frame_unwind record_btrace_tailcall_frame_unwind =
1891{
1892 TAILCALL_FRAME,
1893 record_btrace_frame_unwind_stop_reason,
1894 record_btrace_frame_this_id,
1895 record_btrace_frame_prev_register,
1896 NULL,
1897 record_btrace_tailcall_frame_sniffer,
1898 record_btrace_frame_dealloc_cache
cecac1ab 1899};
b2f4cfde 1900
f6ac5f3d 1901/* Implement the get_unwinder method. */
ac01945b 1902
f6ac5f3d
PA
1903const struct frame_unwind *
1904record_btrace_target::get_unwinder ()
ac01945b
TT
1905{
1906 return &record_btrace_frame_unwind;
1907}
1908
f6ac5f3d 1909/* Implement the get_tailcall_unwinder method. */
ac01945b 1910
f6ac5f3d
PA
1911const struct frame_unwind *
1912record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1913{
1914 return &record_btrace_tailcall_frame_unwind;
1915}
1916
987e68b1
MM
1917/* Return a human-readable string for FLAG. */
1918
1919static const char *
1920btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1921{
1922 switch (flag)
1923 {
1924 case BTHR_STEP:
1925 return "step";
1926
1927 case BTHR_RSTEP:
1928 return "reverse-step";
1929
1930 case BTHR_CONT:
1931 return "cont";
1932
1933 case BTHR_RCONT:
1934 return "reverse-cont";
1935
1936 case BTHR_STOP:
1937 return "stop";
1938 }
1939
1940 return "<invalid>";
1941}
1942
52834460
MM
1943/* Indicate that TP should be resumed according to FLAG. */
1944
1945static void
1946record_btrace_resume_thread (struct thread_info *tp,
1947 enum btrace_thread_flag flag)
1948{
1949 struct btrace_thread_info *btinfo;
1950
43792cf0 1951 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1952 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1953
1954 btinfo = &tp->btrace;
1955
52834460 1956 /* Fetch the latest branch trace. */
4a4495d6 1957 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1958
0ca912df
MM
1959 /* A resume request overwrites a preceding resume or stop request. */
1960 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1961 btinfo->flags |= flag;
1962}
1963
ec71cc2f
MM
1964/* Get the current frame for TP. */
1965
79b8d3b0
TT
1966static struct frame_id
1967get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1968{
79b8d3b0 1969 struct frame_id id;
ec71cc2f
MM
1970 int executing;
1971
00431a78
PA
1972 /* Set current thread, which is implicitly used by
1973 get_current_frame. */
1974 scoped_restore_current_thread restore_thread;
1975
1976 switch_to_thread (tp);
ec71cc2f
MM
1977
1978 /* Clear the executing flag to allow changes to the current frame.
1979 We are not actually running, yet. We just started a reverse execution
1980 command or a record goto command.
1981 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1982 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1983 move the thread. Since we need to recompute the stack, we temporarily
1984 set EXECUTING to flase. */
00431a78
PA
1985 executing = tp->executing;
1986 set_executing (inferior_ptid, false);
ec71cc2f 1987
79b8d3b0 1988 id = null_frame_id;
ec71cc2f
MM
1989 TRY
1990 {
79b8d3b0 1991 id = get_frame_id (get_current_frame ());
ec71cc2f
MM
1992 }
1993 CATCH (except, RETURN_MASK_ALL)
1994 {
1995 /* Restore the previous execution state. */
1996 set_executing (inferior_ptid, executing);
1997
ec71cc2f
MM
1998 throw_exception (except);
1999 }
2000 END_CATCH
2001
2002 /* Restore the previous execution state. */
2003 set_executing (inferior_ptid, executing);
2004
79b8d3b0 2005 return id;
ec71cc2f
MM
2006}
2007
52834460
MM
2008/* Start replaying a thread. */
2009
2010static struct btrace_insn_iterator *
2011record_btrace_start_replaying (struct thread_info *tp)
2012{
52834460
MM
2013 struct btrace_insn_iterator *replay;
2014 struct btrace_thread_info *btinfo;
52834460
MM
2015
2016 btinfo = &tp->btrace;
2017 replay = NULL;
2018
2019 /* We can't start replaying without trace. */
b54b03bd 2020 if (btinfo->functions.empty ())
52834460
MM
2021 return NULL;
2022
52834460
MM
2023 /* GDB stores the current frame_id when stepping in order to detects steps
2024 into subroutines.
2025 Since frames are computed differently when we're replaying, we need to
2026 recompute those stored frames and fix them up so we can still detect
2027 subroutines after we started replaying. */
492d29ea 2028 TRY
52834460 2029 {
52834460
MM
2030 struct frame_id frame_id;
2031 int upd_step_frame_id, upd_step_stack_frame_id;
2032
2033 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2034 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2035
2036 /* Check if we need to update any stepping-related frame id's. */
2037 upd_step_frame_id = frame_id_eq (frame_id,
2038 tp->control.step_frame_id);
2039 upd_step_stack_frame_id = frame_id_eq (frame_id,
2040 tp->control.step_stack_frame_id);
2041
2042 /* We start replaying at the end of the branch trace. This corresponds
2043 to the current instruction. */
8d749320 2044 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2045 btrace_insn_end (replay, btinfo);
2046
31fd9caa
MM
2047 /* Skip gaps at the end of the trace. */
2048 while (btrace_insn_get (replay) == NULL)
2049 {
2050 unsigned int steps;
2051
2052 steps = btrace_insn_prev (replay, 1);
2053 if (steps == 0)
2054 error (_("No trace."));
2055 }
2056
52834460
MM
2057 /* We're not replaying, yet. */
2058 gdb_assert (btinfo->replay == NULL);
2059 btinfo->replay = replay;
2060
2061 /* Make sure we're not using any stale registers. */
00431a78 2062 registers_changed_thread (tp);
52834460
MM
2063
2064 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2065 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2066
2067 /* Replace stepping related frames where necessary. */
2068 if (upd_step_frame_id)
2069 tp->control.step_frame_id = frame_id;
2070 if (upd_step_stack_frame_id)
2071 tp->control.step_stack_frame_id = frame_id;
2072 }
492d29ea 2073 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2074 {
2075 xfree (btinfo->replay);
2076 btinfo->replay = NULL;
2077
00431a78 2078 registers_changed_thread (tp);
52834460
MM
2079
2080 throw_exception (except);
2081 }
492d29ea 2082 END_CATCH
52834460
MM
2083
2084 return replay;
2085}
2086
2087/* Stop replaying a thread. */
2088
2089static void
2090record_btrace_stop_replaying (struct thread_info *tp)
2091{
2092 struct btrace_thread_info *btinfo;
2093
2094 btinfo = &tp->btrace;
2095
2096 xfree (btinfo->replay);
2097 btinfo->replay = NULL;
2098
2099 /* Make sure we're not leaving any stale registers. */
00431a78 2100 registers_changed_thread (tp);
52834460
MM
2101}
2102
e3cfc1c7
MM
2103/* Stop replaying TP if it is at the end of its execution history. */
2104
2105static void
2106record_btrace_stop_replaying_at_end (struct thread_info *tp)
2107{
2108 struct btrace_insn_iterator *replay, end;
2109 struct btrace_thread_info *btinfo;
2110
2111 btinfo = &tp->btrace;
2112 replay = btinfo->replay;
2113
2114 if (replay == NULL)
2115 return;
2116
2117 btrace_insn_end (&end, btinfo);
2118
2119 if (btrace_insn_cmp (replay, &end) == 0)
2120 record_btrace_stop_replaying (tp);
2121}
2122
f6ac5f3d 2123/* The resume method of target record-btrace. */
b2f4cfde 2124
f6ac5f3d
PA
2125void
2126record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2127{
d2939ba2 2128 enum btrace_thread_flag flag, cflag;
52834460 2129
987e68b1 2130 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
f6ac5f3d 2131 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2132 step ? "step" : "cont");
52834460 2133
0ca912df
MM
2134 /* Store the execution direction of the last resume.
2135
f6ac5f3d 2136 If there is more than one resume call, we have to rely on infrun
0ca912df 2137 to not change the execution direction in-between. */
f6ac5f3d 2138 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2139
0ca912df 2140 /* As long as we're not replaying, just forward the request.
52834460 2141
0ca912df
MM
2142 For non-stop targets this means that no thread is replaying. In order to
2143 make progress, we may need to explicitly move replaying threads to the end
2144 of their execution history. */
f6ac5f3d
PA
2145 if ((::execution_direction != EXEC_REVERSE)
2146 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2147 {
b6a8c27b 2148 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2149 return;
b2f4cfde
MM
2150 }
2151
52834460 2152 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2153 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2154 {
2155 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2156 cflag = BTHR_RCONT;
2157 }
52834460 2158 else
d2939ba2
MM
2159 {
2160 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2161 cflag = BTHR_CONT;
2162 }
52834460 2163
52834460 2164 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2165 record_btrace_wait below.
2166
2167 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2168 if (!target_is_non_stop_p ())
2169 {
26a57c92 2170 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2171
08036331
PA
2172 for (thread_info *tp : all_non_exited_threads (ptid))
2173 {
2174 if (tp->ptid.matches (inferior_ptid))
2175 record_btrace_resume_thread (tp, flag);
2176 else
2177 record_btrace_resume_thread (tp, cflag);
2178 }
d2939ba2
MM
2179 }
2180 else
2181 {
08036331
PA
2182 for (thread_info *tp : all_non_exited_threads (ptid))
2183 record_btrace_resume_thread (tp, flag);
d2939ba2 2184 }
70ad5bff
MM
2185
2186 /* Async support. */
2187 if (target_can_async_p ())
2188 {
6a3753b3 2189 target_async (1);
70ad5bff
MM
2190 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2191 }
52834460
MM
2192}
2193
f6ac5f3d 2194/* The commit_resume method of target record-btrace. */
85ad3aaf 2195
f6ac5f3d
PA
2196void
2197record_btrace_target::commit_resume ()
85ad3aaf 2198{
f6ac5f3d
PA
2199 if ((::execution_direction != EXEC_REVERSE)
2200 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2201 beneath ()->commit_resume ();
85ad3aaf
PA
2202}
2203
987e68b1
MM
2204/* Cancel resuming TP. */
2205
2206static void
2207record_btrace_cancel_resume (struct thread_info *tp)
2208{
2209 enum btrace_thread_flag flags;
2210
2211 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2212 if (flags == 0)
2213 return;
2214
43792cf0
PA
2215 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2216 print_thread_id (tp),
987e68b1
MM
2217 target_pid_to_str (tp->ptid), flags,
2218 btrace_thread_flag_to_str (flags));
2219
2220 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2221 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2222}
2223
2224/* Return a target_waitstatus indicating that we ran out of history. */
2225
2226static struct target_waitstatus
2227btrace_step_no_history (void)
2228{
2229 struct target_waitstatus status;
2230
2231 status.kind = TARGET_WAITKIND_NO_HISTORY;
2232
2233 return status;
2234}
2235
2236/* Return a target_waitstatus indicating that a step finished. */
2237
2238static struct target_waitstatus
2239btrace_step_stopped (void)
2240{
2241 struct target_waitstatus status;
2242
2243 status.kind = TARGET_WAITKIND_STOPPED;
2244 status.value.sig = GDB_SIGNAL_TRAP;
2245
2246 return status;
2247}
2248
6e4879f0
MM
2249/* Return a target_waitstatus indicating that a thread was stopped as
2250 requested. */
2251
2252static struct target_waitstatus
2253btrace_step_stopped_on_request (void)
2254{
2255 struct target_waitstatus status;
2256
2257 status.kind = TARGET_WAITKIND_STOPPED;
2258 status.value.sig = GDB_SIGNAL_0;
2259
2260 return status;
2261}
2262
d825d248
MM
2263/* Return a target_waitstatus indicating a spurious stop. */
2264
2265static struct target_waitstatus
2266btrace_step_spurious (void)
2267{
2268 struct target_waitstatus status;
2269
2270 status.kind = TARGET_WAITKIND_SPURIOUS;
2271
2272 return status;
2273}
2274
e3cfc1c7
MM
2275/* Return a target_waitstatus indicating that the thread was not resumed. */
2276
2277static struct target_waitstatus
2278btrace_step_no_resumed (void)
2279{
2280 struct target_waitstatus status;
2281
2282 status.kind = TARGET_WAITKIND_NO_RESUMED;
2283
2284 return status;
2285}
2286
2287/* Return a target_waitstatus indicating that we should wait again. */
2288
2289static struct target_waitstatus
2290btrace_step_again (void)
2291{
2292 struct target_waitstatus status;
2293
2294 status.kind = TARGET_WAITKIND_IGNORE;
2295
2296 return status;
2297}
2298
52834460
MM
2299/* Clear the record histories. */
2300
2301static void
2302record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2303{
2304 xfree (btinfo->insn_history);
2305 xfree (btinfo->call_history);
2306
2307 btinfo->insn_history = NULL;
2308 btinfo->call_history = NULL;
2309}
2310
3c615f99
MM
2311/* Check whether TP's current replay position is at a breakpoint. */
2312
2313static int
2314record_btrace_replay_at_breakpoint (struct thread_info *tp)
2315{
2316 struct btrace_insn_iterator *replay;
2317 struct btrace_thread_info *btinfo;
2318 const struct btrace_insn *insn;
3c615f99
MM
2319
2320 btinfo = &tp->btrace;
2321 replay = btinfo->replay;
2322
2323 if (replay == NULL)
2324 return 0;
2325
2326 insn = btrace_insn_get (replay);
2327 if (insn == NULL)
2328 return 0;
2329
00431a78 2330 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2331 &btinfo->stop_reason);
2332}
2333
d825d248 2334/* Step one instruction in forward direction. */
52834460
MM
2335
2336static struct target_waitstatus
d825d248 2337record_btrace_single_step_forward (struct thread_info *tp)
52834460 2338{
b61ce85c 2339 struct btrace_insn_iterator *replay, end, start;
52834460 2340 struct btrace_thread_info *btinfo;
52834460 2341
d825d248
MM
2342 btinfo = &tp->btrace;
2343 replay = btinfo->replay;
2344
2345 /* We're done if we're not replaying. */
2346 if (replay == NULL)
2347 return btrace_step_no_history ();
2348
011c71b6
MM
2349 /* Check if we're stepping a breakpoint. */
2350 if (record_btrace_replay_at_breakpoint (tp))
2351 return btrace_step_stopped ();
2352
b61ce85c
MM
2353 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2354 jump back to the instruction at which we started. */
2355 start = *replay;
d825d248
MM
2356 do
2357 {
2358 unsigned int steps;
2359
e3cfc1c7
MM
2360 /* We will bail out here if we continue stepping after reaching the end
2361 of the execution history. */
d825d248
MM
2362 steps = btrace_insn_next (replay, 1);
2363 if (steps == 0)
b61ce85c
MM
2364 {
2365 *replay = start;
2366 return btrace_step_no_history ();
2367 }
d825d248
MM
2368 }
2369 while (btrace_insn_get (replay) == NULL);
2370
2371 /* Determine the end of the instruction trace. */
2372 btrace_insn_end (&end, btinfo);
2373
e3cfc1c7
MM
2374 /* The execution trace contains (and ends with) the current instruction.
2375 This instruction has not been executed, yet, so the trace really ends
2376 one instruction earlier. */
d825d248 2377 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2378 return btrace_step_no_history ();
d825d248
MM
2379
2380 return btrace_step_spurious ();
2381}
2382
2383/* Step one instruction in backward direction. */
2384
2385static struct target_waitstatus
2386record_btrace_single_step_backward (struct thread_info *tp)
2387{
b61ce85c 2388 struct btrace_insn_iterator *replay, start;
d825d248 2389 struct btrace_thread_info *btinfo;
e59fa00f 2390
52834460
MM
2391 btinfo = &tp->btrace;
2392 replay = btinfo->replay;
2393
d825d248
MM
2394 /* Start replaying if we're not already doing so. */
2395 if (replay == NULL)
2396 replay = record_btrace_start_replaying (tp);
2397
2398 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2399 Skip gaps during replay. If we end up at a gap (at the beginning of
2400 the trace), jump back to the instruction at which we started. */
2401 start = *replay;
d825d248
MM
2402 do
2403 {
2404 unsigned int steps;
2405
2406 steps = btrace_insn_prev (replay, 1);
2407 if (steps == 0)
b61ce85c
MM
2408 {
2409 *replay = start;
2410 return btrace_step_no_history ();
2411 }
d825d248
MM
2412 }
2413 while (btrace_insn_get (replay) == NULL);
2414
011c71b6
MM
2415 /* Check if we're stepping a breakpoint.
2416
2417 For reverse-stepping, this check is after the step. There is logic in
2418 infrun.c that handles reverse-stepping separately. See, for example,
2419 proceed and adjust_pc_after_break.
2420
2421 This code assumes that for reverse-stepping, PC points to the last
2422 de-executed instruction, whereas for forward-stepping PC points to the
2423 next to-be-executed instruction. */
2424 if (record_btrace_replay_at_breakpoint (tp))
2425 return btrace_step_stopped ();
2426
d825d248
MM
2427 return btrace_step_spurious ();
2428}
2429
2430/* Step a single thread. */
2431
2432static struct target_waitstatus
2433record_btrace_step_thread (struct thread_info *tp)
2434{
2435 struct btrace_thread_info *btinfo;
2436 struct target_waitstatus status;
2437 enum btrace_thread_flag flags;
2438
2439 btinfo = &tp->btrace;
2440
6e4879f0
MM
2441 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2442 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2443
43792cf0 2444 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2445 target_pid_to_str (tp->ptid), flags,
2446 btrace_thread_flag_to_str (flags));
52834460 2447
6e4879f0
MM
2448 /* We can't step without an execution history. */
2449 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2450 return btrace_step_no_history ();
2451
52834460
MM
2452 switch (flags)
2453 {
2454 default:
2455 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2456
6e4879f0
MM
2457 case BTHR_STOP:
2458 return btrace_step_stopped_on_request ();
2459
52834460 2460 case BTHR_STEP:
d825d248
MM
2461 status = record_btrace_single_step_forward (tp);
2462 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2463 break;
52834460
MM
2464
2465 return btrace_step_stopped ();
2466
2467 case BTHR_RSTEP:
d825d248
MM
2468 status = record_btrace_single_step_backward (tp);
2469 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2470 break;
52834460
MM
2471
2472 return btrace_step_stopped ();
2473
2474 case BTHR_CONT:
e3cfc1c7
MM
2475 status = record_btrace_single_step_forward (tp);
2476 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2477 break;
52834460 2478
e3cfc1c7
MM
2479 btinfo->flags |= flags;
2480 return btrace_step_again ();
52834460
MM
2481
2482 case BTHR_RCONT:
e3cfc1c7
MM
2483 status = record_btrace_single_step_backward (tp);
2484 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2485 break;
52834460 2486
e3cfc1c7
MM
2487 btinfo->flags |= flags;
2488 return btrace_step_again ();
2489 }
d825d248 2490
f6ac5f3d 2491 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2492 method will stop the thread for whom the event is reported. */
2493 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2494 btinfo->flags |= flags;
52834460 2495
e3cfc1c7 2496 return status;
b2f4cfde
MM
2497}
2498
a6b5be76
MM
2499/* Announce further events if necessary. */
2500
2501static void
53127008
SM
2502record_btrace_maybe_mark_async_event
2503 (const std::vector<thread_info *> &moving,
2504 const std::vector<thread_info *> &no_history)
a6b5be76 2505{
53127008
SM
2506 bool more_moving = !moving.empty ();
2507 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2508
2509 if (!more_moving && !more_no_history)
2510 return;
2511
2512 if (more_moving)
2513 DEBUG ("movers pending");
2514
2515 if (more_no_history)
2516 DEBUG ("no-history pending");
2517
2518 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2519}
2520
f6ac5f3d 2521/* The wait method of target record-btrace. */
b2f4cfde 2522
f6ac5f3d
PA
2523ptid_t
2524record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2525 int options)
b2f4cfde 2526{
53127008
SM
2527 std::vector<thread_info *> moving;
2528 std::vector<thread_info *> no_history;
52834460
MM
2529
2530 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2531
b2f4cfde 2532 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2533 if ((::execution_direction != EXEC_REVERSE)
2534 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2535 {
b6a8c27b 2536 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2537 }
2538
e3cfc1c7 2539 /* Keep a work list of moving threads. */
08036331
PA
2540 for (thread_info *tp : all_non_exited_threads (ptid))
2541 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2542 moving.push_back (tp);
e3cfc1c7 2543
53127008 2544 if (moving.empty ())
52834460 2545 {
e3cfc1c7 2546 *status = btrace_step_no_resumed ();
52834460 2547
e3cfc1c7 2548 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2549 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2550
e3cfc1c7 2551 return null_ptid;
52834460
MM
2552 }
2553
e3cfc1c7
MM
2554 /* Step moving threads one by one, one step each, until either one thread
2555 reports an event or we run out of threads to step.
2556
2557 When stepping more than one thread, chances are that some threads reach
2558 the end of their execution history earlier than others. If we reported
2559 this immediately, all-stop on top of non-stop would stop all threads and
2560 resume the same threads next time. And we would report the same thread
2561 having reached the end of its execution history again.
2562
2563 In the worst case, this would starve the other threads. But even if other
2564 threads would be allowed to make progress, this would result in far too
2565 many intermediate stops.
2566
2567 We therefore delay the reporting of "no execution history" until we have
2568 nothing else to report. By this time, all threads should have moved to
2569 either the beginning or the end of their execution history. There will
2570 be a single user-visible stop. */
53127008
SM
2571 struct thread_info *eventing = NULL;
2572 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2573 {
53127008 2574 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2575 {
53127008
SM
2576 thread_info *tp = moving[ix];
2577
e3cfc1c7
MM
2578 *status = record_btrace_step_thread (tp);
2579
2580 switch (status->kind)
2581 {
2582 case TARGET_WAITKIND_IGNORE:
2583 ix++;
2584 break;
2585
2586 case TARGET_WAITKIND_NO_HISTORY:
53127008 2587 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2588 break;
2589
2590 default:
53127008 2591 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2592 break;
2593 }
2594 }
2595 }
2596
2597 if (eventing == NULL)
2598 {
2599 /* We started with at least one moving thread. This thread must have
2600 either stopped or reached the end of its execution history.
2601
2602 In the former case, EVENTING must not be NULL.
2603 In the latter case, NO_HISTORY must not be empty. */
53127008 2604 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2605
2606 /* We kept threads moving at the end of their execution history. Stop
2607 EVENTING now that we are going to report its stop. */
53127008 2608 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2609 eventing->btrace.flags &= ~BTHR_MOVE;
2610
2611 *status = btrace_step_no_history ();
2612 }
2613
2614 gdb_assert (eventing != NULL);
2615
2616 /* We kept threads replaying at the end of their execution history. Stop
2617 replaying EVENTING now that we are going to report its stop. */
2618 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2619
2620 /* Stop all other threads. */
5953356c 2621 if (!target_is_non_stop_p ())
53127008 2622 {
08036331 2623 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2624 record_btrace_cancel_resume (tp);
2625 }
52834460 2626
a6b5be76
MM
2627 /* In async mode, we need to announce further events. */
2628 if (target_is_async_p ())
2629 record_btrace_maybe_mark_async_event (moving, no_history);
2630
52834460 2631 /* Start record histories anew from the current position. */
e3cfc1c7 2632 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2633
2634 /* We moved the replay position but did not update registers. */
00431a78 2635 registers_changed_thread (eventing);
e3cfc1c7 2636
43792cf0
PA
2637 DEBUG ("wait ended by thread %s (%s): %s",
2638 print_thread_id (eventing),
e3cfc1c7 2639 target_pid_to_str (eventing->ptid),
23fdd69e 2640 target_waitstatus_to_string (status).c_str ());
52834460 2641
e3cfc1c7 2642 return eventing->ptid;
52834460
MM
2643}
2644
f6ac5f3d 2645/* The stop method of target record-btrace. */
6e4879f0 2646
f6ac5f3d
PA
2647void
2648record_btrace_target::stop (ptid_t ptid)
6e4879f0
MM
2649{
2650 DEBUG ("stop %s", target_pid_to_str (ptid));
2651
2652 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2653 if ((::execution_direction != EXEC_REVERSE)
2654 && !record_is_replaying (minus_one_ptid))
6e4879f0 2655 {
b6a8c27b 2656 this->beneath ()->stop (ptid);
6e4879f0
MM
2657 }
2658 else
2659 {
08036331
PA
2660 for (thread_info *tp : all_non_exited_threads (ptid))
2661 {
2662 tp->btrace.flags &= ~BTHR_MOVE;
2663 tp->btrace.flags |= BTHR_STOP;
2664 }
6e4879f0
MM
2665 }
2666 }
2667
f6ac5f3d 2668/* The can_execute_reverse method of target record-btrace. */
52834460 2669
57810aa7 2670bool
f6ac5f3d 2671record_btrace_target::can_execute_reverse ()
52834460 2672{
57810aa7 2673 return true;
52834460
MM
2674}
2675
f6ac5f3d 2676/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2677
57810aa7 2678bool
f6ac5f3d 2679record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2680{
f6ac5f3d 2681 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2682 {
2683 struct thread_info *tp = inferior_thread ();
2684
2685 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2686 }
2687
b6a8c27b 2688 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2689}
2690
f6ac5f3d 2691/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2692 record-btrace. */
2693
57810aa7 2694bool
f6ac5f3d 2695record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2696{
f6ac5f3d 2697 if (record_is_replaying (minus_one_ptid))
57810aa7 2698 return true;
9e8915c6 2699
b6a8c27b 2700 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2701}
2702
f6ac5f3d 2703/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2704
57810aa7 2705bool
f6ac5f3d 2706record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2707{
f6ac5f3d 2708 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2709 {
2710 struct thread_info *tp = inferior_thread ();
2711
2712 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2713 }
2714
b6a8c27b 2715 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2716}
2717
f6ac5f3d 2718/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2719 record-btrace. */
2720
57810aa7 2721bool
f6ac5f3d 2722record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2723{
f6ac5f3d 2724 if (record_is_replaying (minus_one_ptid))
57810aa7 2725 return true;
52834460 2726
b6a8c27b 2727 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2728}
2729
f6ac5f3d 2730/* The update_thread_list method of target record-btrace. */
e2887aa3 2731
f6ac5f3d
PA
2732void
2733record_btrace_target::update_thread_list ()
e2887aa3 2734{
e8032dde 2735 /* We don't add or remove threads during replay. */
f6ac5f3d 2736 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2737 return;
2738
2739 /* Forward the request. */
b6a8c27b 2740 this->beneath ()->update_thread_list ();
e2887aa3
MM
2741}
2742
f6ac5f3d 2743/* The thread_alive method of target record-btrace. */
e2887aa3 2744
57810aa7 2745bool
f6ac5f3d 2746record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2747{
2748 /* We don't add or remove threads during replay. */
f6ac5f3d 2749 if (record_is_replaying (minus_one_ptid))
00431a78 2750 return true;
e2887aa3
MM
2751
2752 /* Forward the request. */
b6a8c27b 2753 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2754}
2755
066ce621
MM
2756/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2757 is stopped. */
2758
2759static void
2760record_btrace_set_replay (struct thread_info *tp,
2761 const struct btrace_insn_iterator *it)
2762{
2763 struct btrace_thread_info *btinfo;
2764
2765 btinfo = &tp->btrace;
2766
a0f1b963 2767 if (it == NULL)
52834460 2768 record_btrace_stop_replaying (tp);
066ce621
MM
2769 else
2770 {
2771 if (btinfo->replay == NULL)
52834460 2772 record_btrace_start_replaying (tp);
066ce621
MM
2773 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2774 return;
2775
2776 *btinfo->replay = *it;
00431a78 2777 registers_changed_thread (tp);
066ce621
MM
2778 }
2779
52834460
MM
2780 /* Start anew from the new replay position. */
2781 record_btrace_clear_histories (btinfo);
485668e5 2782
f2ffa92b
PA
2783 inferior_thread ()->suspend.stop_pc
2784 = regcache_read_pc (get_current_regcache ());
485668e5 2785 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2786}
2787
f6ac5f3d 2788/* The goto_record_begin method of target record-btrace. */
066ce621 2789
f6ac5f3d
PA
2790void
2791record_btrace_target::goto_record_begin ()
066ce621
MM
2792{
2793 struct thread_info *tp;
2794 struct btrace_insn_iterator begin;
2795
2796 tp = require_btrace_thread ();
2797
2798 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2799
2800 /* Skip gaps at the beginning of the trace. */
2801 while (btrace_insn_get (&begin) == NULL)
2802 {
2803 unsigned int steps;
2804
2805 steps = btrace_insn_next (&begin, 1);
2806 if (steps == 0)
2807 error (_("No trace."));
2808 }
2809
066ce621 2810 record_btrace_set_replay (tp, &begin);
066ce621
MM
2811}
2812
f6ac5f3d 2813/* The goto_record_end method of target record-btrace. */
066ce621 2814
f6ac5f3d
PA
2815void
2816record_btrace_target::goto_record_end ()
066ce621
MM
2817{
2818 struct thread_info *tp;
2819
2820 tp = require_btrace_thread ();
2821
2822 record_btrace_set_replay (tp, NULL);
066ce621
MM
2823}
2824
f6ac5f3d 2825/* The goto_record method of target record-btrace. */
066ce621 2826
f6ac5f3d
PA
2827void
2828record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2829{
2830 struct thread_info *tp;
2831 struct btrace_insn_iterator it;
2832 unsigned int number;
2833 int found;
2834
2835 number = insn;
2836
2837 /* Check for wrap-arounds. */
2838 if (number != insn)
2839 error (_("Instruction number out of range."));
2840
2841 tp = require_btrace_thread ();
2842
2843 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2844
2845 /* Check if the instruction could not be found or is a gap. */
2846 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2847 error (_("No such instruction."));
2848
2849 record_btrace_set_replay (tp, &it);
066ce621
MM
2850}
2851
f6ac5f3d 2852/* The record_stop_replaying method of target record-btrace. */
797094dd 2853
f6ac5f3d
PA
2854void
2855record_btrace_target::record_stop_replaying ()
797094dd 2856{
08036331 2857 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2858 record_btrace_stop_replaying (tp);
2859}
2860
f6ac5f3d 2861/* The execution_direction target method. */
70ad5bff 2862
f6ac5f3d
PA
2863enum exec_direction_kind
2864record_btrace_target::execution_direction ()
70ad5bff
MM
2865{
2866 return record_btrace_resume_exec_dir;
2867}
2868
f6ac5f3d 2869/* The prepare_to_generate_core target method. */
aef92902 2870
f6ac5f3d
PA
2871void
2872record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2873{
2874 record_btrace_generating_corefile = 1;
2875}
2876
f6ac5f3d 2877/* The done_generating_core target method. */
aef92902 2878
f6ac5f3d
PA
2879void
2880record_btrace_target::done_generating_core ()
aef92902
MM
2881{
2882 record_btrace_generating_corefile = 0;
2883}
2884
f4abbc16
MM
2885/* Start recording in BTS format. */
2886
2887static void
cdb34d4a 2888cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2889{
f4abbc16
MM
2890 if (args != NULL && *args != 0)
2891 error (_("Invalid argument."));
2892
2893 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2894
492d29ea
PA
2895 TRY
2896 {
95a6b0a1 2897 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2898 }
2899 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2900 {
2901 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2902 throw_exception (exception);
2903 }
492d29ea 2904 END_CATCH
f4abbc16
MM
2905}
2906
bc504a31 2907/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2908
2909static void
cdb34d4a 2910cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2911{
2912 if (args != NULL && *args != 0)
2913 error (_("Invalid argument."));
2914
b20a6524 2915 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2916
492d29ea
PA
2917 TRY
2918 {
95a6b0a1 2919 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2920 }
2921 CATCH (exception, RETURN_MASK_ALL)
2922 {
2923 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2924 throw_exception (exception);
2925 }
2926 END_CATCH
afedecd3
MM
2927}
2928
b20a6524
MM
2929/* Alias for "target record". */
2930
2931static void
981a3fb3 2932cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2933{
2934 if (args != NULL && *args != 0)
2935 error (_("Invalid argument."));
2936
2937 record_btrace_conf.format = BTRACE_FORMAT_PT;
2938
2939 TRY
2940 {
95a6b0a1 2941 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2942 }
2943 CATCH (exception, RETURN_MASK_ALL)
2944 {
2945 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2946
2947 TRY
2948 {
95a6b0a1 2949 execute_command ("target record-btrace", from_tty);
b20a6524 2950 }
b926417a 2951 CATCH (ex, RETURN_MASK_ALL)
b20a6524
MM
2952 {
2953 record_btrace_conf.format = BTRACE_FORMAT_NONE;
b926417a 2954 throw_exception (ex);
b20a6524
MM
2955 }
2956 END_CATCH
2957 }
2958 END_CATCH
2959}
2960
67b5c0c1
MM
2961/* The "set record btrace" command. */
2962
2963static void
981a3fb3 2964cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2965{
b85310e1
MM
2966 printf_unfiltered (_("\"set record btrace\" must be followed "
2967 "by an appropriate subcommand.\n"));
2968 help_list (set_record_btrace_cmdlist, "set record btrace ",
2969 all_commands, gdb_stdout);
67b5c0c1
MM
2970}
2971
2972/* The "show record btrace" command. */
2973
2974static void
981a3fb3 2975cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2976{
2977 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2978}
2979
2980/* The "show record btrace replay-memory-access" command. */
2981
2982static void
2983cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2984 struct cmd_list_element *c, const char *value)
2985{
2986 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2987 replay_memory_access);
2988}
2989
4a4495d6
MM
2990/* The "set record btrace cpu none" command. */
2991
2992static void
2993cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2994{
2995 if (args != nullptr && *args != 0)
2996 error (_("Trailing junk: '%s'."), args);
2997
2998 record_btrace_cpu_state = CS_NONE;
2999}
3000
3001/* The "set record btrace cpu auto" command. */
3002
3003static void
3004cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3005{
3006 if (args != nullptr && *args != 0)
3007 error (_("Trailing junk: '%s'."), args);
3008
3009 record_btrace_cpu_state = CS_AUTO;
3010}
3011
3012/* The "set record btrace cpu" command. */
3013
3014static void
3015cmd_set_record_btrace_cpu (const char *args, int from_tty)
3016{
3017 if (args == nullptr)
3018 args = "";
3019
3020 /* We use a hard-coded vendor string for now. */
3021 unsigned int family, model, stepping;
3022 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3023 &model, &l1, &stepping, &l2);
3024 if (matches == 3)
3025 {
3026 if (strlen (args) != l2)
3027 error (_("Trailing junk: '%s'."), args + l2);
3028 }
3029 else if (matches == 2)
3030 {
3031 if (strlen (args) != l1)
3032 error (_("Trailing junk: '%s'."), args + l1);
3033
3034 stepping = 0;
3035 }
3036 else
3037 error (_("Bad format. See \"help set record btrace cpu\"."));
3038
3039 if (USHRT_MAX < family)
3040 error (_("Cpu family too big."));
3041
3042 if (UCHAR_MAX < model)
3043 error (_("Cpu model too big."));
3044
3045 if (UCHAR_MAX < stepping)
3046 error (_("Cpu stepping too big."));
3047
3048 record_btrace_cpu.vendor = CV_INTEL;
3049 record_btrace_cpu.family = family;
3050 record_btrace_cpu.model = model;
3051 record_btrace_cpu.stepping = stepping;
3052
3053 record_btrace_cpu_state = CS_CPU;
3054}
3055
3056/* The "show record btrace cpu" command. */
3057
3058static void
3059cmd_show_record_btrace_cpu (const char *args, int from_tty)
3060{
4a4495d6
MM
3061 if (args != nullptr && *args != 0)
3062 error (_("Trailing junk: '%s'."), args);
3063
3064 switch (record_btrace_cpu_state)
3065 {
3066 case CS_AUTO:
3067 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3068 return;
3069
3070 case CS_NONE:
3071 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3072 return;
3073
3074 case CS_CPU:
3075 switch (record_btrace_cpu.vendor)
3076 {
3077 case CV_INTEL:
3078 if (record_btrace_cpu.stepping == 0)
3079 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3080 record_btrace_cpu.family,
3081 record_btrace_cpu.model);
3082 else
3083 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3084 record_btrace_cpu.family,
3085 record_btrace_cpu.model,
3086 record_btrace_cpu.stepping);
3087 return;
3088 }
3089 }
3090
3091 error (_("Internal error: bad cpu state."));
3092}
3093
3094/* The "s record btrace bts" command. */
d33501a5
MM
3095
3096static void
981a3fb3 3097cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3098{
3099 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3100 "by an appropriate subcommand.\n"));
d33501a5
MM
3101 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3102 all_commands, gdb_stdout);
3103}
3104
3105/* The "show record btrace bts" command. */
3106
3107static void
981a3fb3 3108cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3109{
3110 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3111}
3112
b20a6524
MM
3113/* The "set record btrace pt" command. */
3114
3115static void
981a3fb3 3116cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3117{
3118 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3119 "by an appropriate subcommand.\n"));
3120 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3121 all_commands, gdb_stdout);
3122}
3123
3124/* The "show record btrace pt" command. */
3125
3126static void
981a3fb3 3127cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3128{
3129 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3130}
3131
3132/* The "record bts buffer-size" show value function. */
3133
3134static void
3135show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3136 struct cmd_list_element *c,
3137 const char *value)
3138{
3139 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3140 value);
3141}
3142
3143/* The "record pt buffer-size" show value function. */
3144
3145static void
3146show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3147 struct cmd_list_element *c,
3148 const char *value)
3149{
3150 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3151 value);
3152}
3153
afedecd3
MM
3154/* Initialize btrace commands. */
3155
3156void
3157_initialize_record_btrace (void)
3158{
f4abbc16
MM
3159 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3160 _("Start branch trace recording."), &record_btrace_cmdlist,
3161 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3162 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3163
f4abbc16
MM
3164 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3165 _("\
3166Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3167The processor stores a from/to record for each branch into a cyclic buffer.\n\
3168This format may not be available on all processors."),
3169 &record_btrace_cmdlist);
3170 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3171
b20a6524
MM
3172 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3173 _("\
bc504a31 3174Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3175This format may not be available on all processors."),
3176 &record_btrace_cmdlist);
3177 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3178
67b5c0c1
MM
3179 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3180 _("Set record options"), &set_record_btrace_cmdlist,
3181 "set record btrace ", 0, &set_record_cmdlist);
3182
3183 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3184 _("Show record options"), &show_record_btrace_cmdlist,
3185 "show record btrace ", 0, &show_record_cmdlist);
3186
3187 add_setshow_enum_cmd ("replay-memory-access", no_class,
3188 replay_memory_access_types, &replay_memory_access, _("\
3189Set what memory accesses are allowed during replay."), _("\
3190Show what memory accesses are allowed during replay."),
3191 _("Default is READ-ONLY.\n\n\
3192The btrace record target does not trace data.\n\
3193The memory therefore corresponds to the live target and not \
3194to the current replay position.\n\n\
3195When READ-ONLY, allow accesses to read-only memory during replay.\n\
3196When READ-WRITE, allow accesses to read-only and read-write memory during \
3197replay."),
3198 NULL, cmd_show_replay_memory_access,
3199 &set_record_btrace_cmdlist,
3200 &show_record_btrace_cmdlist);
3201
4a4495d6
MM
3202 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3203 _("\
3204Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3205The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3206For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3207When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3208The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3209When GDB does not support that cpu, this option can be used to enable\n\
3210workarounds for a similar cpu that GDB supports.\n\n\
3211When set to \"none\", errata workarounds are disabled."),
3212 &set_record_btrace_cpu_cmdlist,
3213 _("set record btrace cpu "), 1,
3214 &set_record_btrace_cmdlist);
3215
3216 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3217Automatically determine the cpu to be used for trace decode."),
3218 &set_record_btrace_cpu_cmdlist);
3219
3220 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3221Do not enable errata workarounds for trace decode."),
3222 &set_record_btrace_cpu_cmdlist);
3223
3224 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3225Show the cpu to be used for trace decode."),
3226 &show_record_btrace_cmdlist);
3227
d33501a5
MM
3228 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3229 _("Set record btrace bts options"),
3230 &set_record_btrace_bts_cmdlist,
3231 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3232
3233 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3234 _("Show record btrace bts options"),
3235 &show_record_btrace_bts_cmdlist,
3236 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3237
3238 add_setshow_uinteger_cmd ("buffer-size", no_class,
3239 &record_btrace_conf.bts.size,
3240 _("Set the record/replay bts buffer size."),
3241 _("Show the record/replay bts buffer size."), _("\
3242When starting recording request a trace buffer of this size. \
3243The actual buffer size may differ from the requested size. \
3244Use \"info record\" to see the actual buffer size.\n\n\
3245Bigger buffers allow longer recording but also take more time to process \
3246the recorded execution trace.\n\n\
b20a6524
MM
3247The trace buffer size may not be changed while recording."), NULL,
3248 show_record_bts_buffer_size_value,
d33501a5
MM
3249 &set_record_btrace_bts_cmdlist,
3250 &show_record_btrace_bts_cmdlist);
3251
b20a6524
MM
3252 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3253 _("Set record btrace pt options"),
3254 &set_record_btrace_pt_cmdlist,
3255 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3256
3257 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3258 _("Show record btrace pt options"),
3259 &show_record_btrace_pt_cmdlist,
3260 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3261
3262 add_setshow_uinteger_cmd ("buffer-size", no_class,
3263 &record_btrace_conf.pt.size,
3264 _("Set the record/replay pt buffer size."),
3265 _("Show the record/replay pt buffer size."), _("\
3266Bigger buffers allow longer recording but also take more time to process \
3267the recorded execution.\n\
3268The actual buffer size may differ from the requested size. Use \"info record\" \
3269to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3270 &set_record_btrace_pt_cmdlist,
3271 &show_record_btrace_pt_cmdlist);
3272
d9f719f1 3273 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3274
3275 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3276 xcalloc, xfree);
d33501a5
MM
3277
3278 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3279 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3280}