]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
gdb/testsuite: Fix pretty-print.exp on big-endian platforms
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
42a4f53d 3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
268a13a5 41#include "gdbsupport/vec.h"
00431a78 42#include "inferior.h"
325fac50 43#include <algorithm>
0d12e84c 44#include "gdbarch.h"
afedecd3 45
d9f719f1
PA
46static const target_info record_btrace_target_info = {
47 "record-btrace",
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
50};
51
afedecd3 52/* The target_ops of record-btrace. */
f6ac5f3d
PA
53
54class record_btrace_target final : public target_ops
55{
56public:
d9f719f1
PA
57 const target_info &info () const override
58 { return record_btrace_target_info; }
f6ac5f3d 59
66b4deae
PA
60 strata stratum () const override { return record_stratum; }
61
f6ac5f3d
PA
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
57810aa7
PA
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
57810aa7 123 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
57810aa7 128 bool can_execute_reverse () override;
f6ac5f3d 129
57810aa7
PA
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 132
57810aa7
PA
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139};
140
141static record_btrace_target record_btrace_ops;
142
143/* Initialize the record-btrace target ops. */
afedecd3 144
76727919
TT
145/* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
3dcfdc58 147static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 148
67b5c0c1
MM
149/* Memory access types used in set/show record btrace replay-memory-access. */
150static const char replay_memory_access_read_only[] = "read-only";
151static const char replay_memory_access_read_write[] = "read-write";
152static const char *const replay_memory_access_types[] =
153{
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157};
158
159/* The currently allowed replay memory access type. */
160static const char *replay_memory_access = replay_memory_access_read_only;
161
4a4495d6
MM
162/* The cpu state kinds. */
163enum record_btrace_cpu_state_kind
164{
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168};
169
170/* The current cpu state. */
171static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173/* The current cpu for trace decode. */
174static struct btrace_cpu record_btrace_cpu;
175
67b5c0c1
MM
176/* Command lists for "set/show record btrace". */
177static struct cmd_list_element *set_record_btrace_cmdlist;
178static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 179
70ad5bff
MM
180/* The execution direction of the last resume we got. See record-full.c. */
181static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183/* The async event handler for reverse/replay execution. */
184static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
aef92902
MM
186/* A flag indicating that we are currently generating a core file. */
187static int record_btrace_generating_corefile;
188
f4abbc16
MM
189/* The current branch trace configuration. */
190static struct btrace_config record_btrace_conf;
191
192/* Command list for "record btrace". */
193static struct cmd_list_element *record_btrace_cmdlist;
194
d33501a5
MM
195/* Command lists for "set/show record btrace bts". */
196static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
b20a6524
MM
199/* Command lists for "set/show record btrace pt". */
200static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
4a4495d6
MM
203/* Command list for "set record btrace cpu". */
204static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
afedecd3
MM
206/* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209#define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
4a4495d6
MM
219/* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221const struct btrace_cpu *
222record_btrace_get_cpu (void)
223{
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237}
238
afedecd3 239/* Update the branch trace for the current thread and return a pointer to its
066ce621 240 thread_info.
afedecd3
MM
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
066ce621
MM
245static struct thread_info *
246require_btrace_thread (void)
afedecd3 247{
afedecd3
MM
248 DEBUG ("require");
249
00431a78 250 if (inferior_ptid == null_ptid)
afedecd3
MM
251 error (_("No thread."));
252
00431a78
PA
253 thread_info *tp = inferior_thread ();
254
cd4007e4
MM
255 validate_registers_access ();
256
4a4495d6 257 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 258
6e07b1d2 259 if (btrace_is_empty (tp))
afedecd3
MM
260 error (_("No trace."));
261
066ce621
MM
262 return tp;
263}
264
265/* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271static struct btrace_thread_info *
272require_btrace (void)
273{
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
afedecd3
MM
279}
280
281/* Enable branch tracing for one thread. Warn on errors. */
282
283static void
284record_btrace_enable_warn (struct thread_info *tp)
285{
a70b8144 286 try
492d29ea
PA
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
230d2906 290 catch (const gdb_exception_error &error)
492d29ea 291 {
3d6e9d23 292 warning ("%s", error.what ());
492d29ea 293 }
afedecd3
MM
294}
295
afedecd3
MM
296/* Enable automatic tracing of new threads. */
297
298static void
299record_btrace_auto_enable (void)
300{
301 DEBUG ("attach thread observer");
302
76727919
TT
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
afedecd3
MM
305}
306
307/* Disable automatic tracing of new threads. */
308
309static void
310record_btrace_auto_disable (void)
311{
afedecd3
MM
312 DEBUG ("detach thread observer");
313
76727919 314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
315}
316
70ad5bff
MM
317/* The record-btrace async event handler function. */
318
319static void
320record_btrace_handle_async_inferior_event (gdb_client_data data)
321{
322 inferior_event_handler (INF_REG_EVENT, NULL);
323}
324
c0272db5
TW
325/* See record-btrace.h. */
326
327void
328record_btrace_push_target (void)
329{
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
76727919 342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
343}
344
228f1508
SM
345/* Disable btrace on a set of threads on scope exit. */
346
347struct scoped_btrace_disable
348{
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369private:
370 std::forward_list<thread_info *> m_threads;
371};
372
d9f719f1 373/* Open target record-btrace. */
afedecd3 374
d9f719f1
PA
375static void
376record_btrace_target_open (const char *args, int from_tty)
afedecd3 377{
228f1508
SM
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
afedecd3
MM
381
382 DEBUG ("open");
383
8213266a 384 record_preopen ();
afedecd3
MM
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
08036331 389 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 391 {
f4abbc16 392 btrace_enable (tp, &record_btrace_conf);
afedecd3 393
228f1508 394 btrace_disable.add_thread (tp);
afedecd3
MM
395 }
396
c0272db5 397 record_btrace_push_target ();
afedecd3 398
228f1508 399 btrace_disable.discard ();
afedecd3
MM
400}
401
f6ac5f3d 402/* The stop_recording method of target record-btrace. */
afedecd3 403
f6ac5f3d
PA
404void
405record_btrace_target::stop_recording ()
afedecd3 406{
afedecd3
MM
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
08036331 411 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414}
415
f6ac5f3d 416/* The disconnect method of target record-btrace. */
c0272db5 417
f6ac5f3d
PA
418void
419record_btrace_target::disconnect (const char *args,
420 int from_tty)
c0272db5 421{
b6a8c27b 422 struct target_ops *beneath = this->beneath ();
c0272db5
TW
423
424 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 425 unpush_target (this);
c0272db5
TW
426
427 /* Forward disconnect. */
f6ac5f3d 428 beneath->disconnect (args, from_tty);
c0272db5
TW
429}
430
f6ac5f3d 431/* The close method of target record-btrace. */
afedecd3 432
f6ac5f3d
PA
433void
434record_btrace_target::close ()
afedecd3 435{
70ad5bff
MM
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
99c819ee
MM
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
568e808b
MM
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
08036331 445 for (thread_info *tp : all_non_exited_threads ())
568e808b 446 btrace_teardown (tp);
afedecd3
MM
447}
448
f6ac5f3d 449/* The async method of target record-btrace. */
b7d2e916 450
f6ac5f3d
PA
451void
452record_btrace_target::async (int enable)
b7d2e916 453{
6a3753b3 454 if (enable)
b7d2e916
PA
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
b6a8c27b 459 this->beneath ()->async (enable);
b7d2e916
PA
460}
461
d33501a5
MM
462/* Adjusts the size and returns a human readable size suffix. */
463
464static const char *
465record_btrace_adjust_size (unsigned int *size)
466{
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488}
489
490/* Print a BTS configuration. */
491
492static void
493record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494{
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504}
505
bc504a31 506/* Print an Intel Processor Trace configuration. */
b20a6524
MM
507
508static void
509record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510{
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520}
521
d33501a5
MM
522/* Print a branch tracing configuration. */
523
524static void
525record_btrace_print_conf (const struct btrace_config *conf)
526{
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
b20a6524
MM
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
d33501a5
MM
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545}
546
f6ac5f3d 547/* The info_record method of target record-btrace. */
afedecd3 548
f6ac5f3d
PA
549void
550record_btrace_target::info_record ()
afedecd3
MM
551{
552 struct btrace_thread_info *btinfo;
f4abbc16 553 const struct btrace_config *conf;
afedecd3 554 struct thread_info *tp;
31fd9caa 555 unsigned int insns, calls, gaps;
afedecd3
MM
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
cd4007e4
MM
563 validate_registers_access ();
564
f4abbc16
MM
565 btinfo = &tp->btrace;
566
f6ac5f3d 567 conf = ::btrace_conf (btinfo);
f4abbc16 568 if (conf != NULL)
d33501a5 569 record_btrace_print_conf (conf);
f4abbc16 570
4a4495d6 571 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 572
23a7fe75
MM
573 insns = 0;
574 calls = 0;
31fd9caa 575 gaps = 0;
23a7fe75 576
6e07b1d2 577 if (!btrace_is_empty (tp))
23a7fe75
MM
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
5de9129b 584 calls = btrace_call_number (&call);
23a7fe75
MM
585
586 btrace_insn_end (&insn, btinfo);
5de9129b 587 insns = btrace_insn_number (&insn);
31fd9caa 588
69090cee
TW
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
31fd9caa
MM
593
594 gaps = btinfo->ngaps;
23a7fe75 595 }
afedecd3 596
31fd9caa 597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 598 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
afedecd3
MM
605}
606
31fd9caa
MM
607/* Print a decode error. */
608
609static void
610btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612{
508352a9 613 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 614
112e8700 615 uiout->text (_("["));
508352a9
TW
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 618 {
112e8700 619 uiout->text (_("decode error ("));
381befee 620 uiout->field_signed ("errcode", errcode);
112e8700 621 uiout->text (_("): "));
31fd9caa 622 }
112e8700
SM
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
31fd9caa
MM
625}
626
f94cc897
MM
627/* A range of source lines. */
628
629struct btrace_line_range
630{
631 /* The symtab this line is from. */
632 struct symtab *symtab;
633
634 /* The first line (inclusive). */
635 int begin;
636
637 /* The last line (exclusive). */
638 int end;
639};
640
641/* Construct a line range. */
642
643static struct btrace_line_range
644btrace_mk_line_range (struct symtab *symtab, int begin, int end)
645{
646 struct btrace_line_range range;
647
648 range.symtab = symtab;
649 range.begin = begin;
650 range.end = end;
651
652 return range;
653}
654
655/* Add a line to a line range. */
656
657static struct btrace_line_range
658btrace_line_range_add (struct btrace_line_range range, int line)
659{
660 if (range.end <= range.begin)
661 {
662 /* This is the first entry. */
663 range.begin = line;
664 range.end = line + 1;
665 }
666 else if (line < range.begin)
667 range.begin = line;
668 else if (range.end < line)
669 range.end = line;
670
671 return range;
672}
673
674/* Return non-zero if RANGE is empty, zero otherwise. */
675
676static int
677btrace_line_range_is_empty (struct btrace_line_range range)
678{
679 return range.end <= range.begin;
680}
681
682/* Return non-zero if LHS contains RHS, zero otherwise. */
683
684static int
685btrace_line_range_contains_range (struct btrace_line_range lhs,
686 struct btrace_line_range rhs)
687{
688 return ((lhs.symtab == rhs.symtab)
689 && (lhs.begin <= rhs.begin)
690 && (rhs.end <= lhs.end));
691}
692
693/* Find the line range associated with PC. */
694
695static struct btrace_line_range
696btrace_find_line_range (CORE_ADDR pc)
697{
698 struct btrace_line_range range;
699 struct linetable_entry *lines;
700 struct linetable *ltable;
701 struct symtab *symtab;
702 int nlines, i;
703
704 symtab = find_pc_line_symtab (pc);
705 if (symtab == NULL)
706 return btrace_mk_line_range (NULL, 0, 0);
707
708 ltable = SYMTAB_LINETABLE (symtab);
709 if (ltable == NULL)
710 return btrace_mk_line_range (symtab, 0, 0);
711
712 nlines = ltable->nitems;
713 lines = ltable->item;
714 if (nlines <= 0)
715 return btrace_mk_line_range (symtab, 0, 0);
716
717 range = btrace_mk_line_range (symtab, 0, 0);
718 for (i = 0; i < nlines - 1; i++)
719 {
720 if ((lines[i].pc == pc) && (lines[i].line != 0))
721 range = btrace_line_range_add (range, lines[i].line);
722 }
723
724 return range;
725}
726
727/* Print source lines in LINES to UIOUT.
728
729 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
730 instructions corresponding to that source line. When printing a new source
731 line, we do the cleanups for the open chain and open a new cleanup chain for
732 the new source line. If the source line range in LINES is not empty, this
733 function will leave the cleanup chain for the last printed source line open
734 so instructions can be added to it. */
735
736static void
737btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
738 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
739 gdb::optional<ui_out_emit_list> *asm_list,
740 gdb_disassembly_flags flags)
f94cc897 741{
8d297bbf 742 print_source_lines_flags psl_flags;
f94cc897 743
f94cc897
MM
744 if (flags & DISASSEMBLY_FILENAME)
745 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
746
7ea78b59 747 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 748 {
7ea78b59 749 asm_list->reset ();
f94cc897 750
7ea78b59 751 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
752
753 print_source_lines (lines.symtab, line, line + 1, psl_flags);
754
7ea78b59 755 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
756 }
757}
758
afedecd3
MM
759/* Disassemble a section of the recorded instruction trace. */
760
761static void
23a7fe75 762btrace_insn_history (struct ui_out *uiout,
31fd9caa 763 const struct btrace_thread_info *btinfo,
23a7fe75 764 const struct btrace_insn_iterator *begin,
9a24775b
PA
765 const struct btrace_insn_iterator *end,
766 gdb_disassembly_flags flags)
afedecd3 767{
9a24775b
PA
768 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
769 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 770
f94cc897
MM
771 flags |= DISASSEMBLY_SPECULATIVE;
772
7ea78b59
SM
773 struct gdbarch *gdbarch = target_gdbarch ();
774 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 775
7ea78b59 776 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 777
7ea78b59
SM
778 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
779 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 780
046bebe1 781 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
8b172ce7 782
7ea78b59
SM
783 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
784 btrace_insn_next (&it, 1))
afedecd3 785 {
23a7fe75
MM
786 const struct btrace_insn *insn;
787
788 insn = btrace_insn_get (&it);
789
31fd9caa
MM
790 /* A NULL instruction indicates a gap in the trace. */
791 if (insn == NULL)
792 {
793 const struct btrace_config *conf;
794
795 conf = btrace_conf (btinfo);
afedecd3 796
31fd9caa
MM
797 /* We have trace so we must have a configuration. */
798 gdb_assert (conf != NULL);
799
69090cee
TW
800 uiout->field_fmt ("insn-number", "%u",
801 btrace_insn_number (&it));
802 uiout->text ("\t");
803
804 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
805 conf->format);
806 }
807 else
808 {
f94cc897 809 struct disasm_insn dinsn;
da8c46d2 810
f94cc897 811 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 812 {
f94cc897
MM
813 struct btrace_line_range lines;
814
815 lines = btrace_find_line_range (insn->pc);
816 if (!btrace_line_range_is_empty (lines)
817 && !btrace_line_range_contains_range (last_lines, lines))
818 {
7ea78b59
SM
819 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
820 flags);
f94cc897
MM
821 last_lines = lines;
822 }
7ea78b59 823 else if (!src_and_asm_tuple.has_value ())
f94cc897 824 {
7ea78b59
SM
825 gdb_assert (!asm_list.has_value ());
826
827 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
828
f94cc897 829 /* No source information. */
7ea78b59 830 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
831 }
832
7ea78b59
SM
833 gdb_assert (src_and_asm_tuple.has_value ());
834 gdb_assert (asm_list.has_value ());
da8c46d2 835 }
da8c46d2 836
f94cc897
MM
837 memset (&dinsn, 0, sizeof (dinsn));
838 dinsn.number = btrace_insn_number (&it);
839 dinsn.addr = insn->pc;
31fd9caa 840
da8c46d2 841 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 842 dinsn.is_speculative = 1;
da8c46d2 843
046bebe1 844 disasm.pretty_print_insn (&dinsn, flags);
31fd9caa 845 }
afedecd3
MM
846 }
847}
848
f6ac5f3d 849/* The insn_history method of target record-btrace. */
afedecd3 850
f6ac5f3d
PA
851void
852record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
853{
854 struct btrace_thread_info *btinfo;
23a7fe75
MM
855 struct btrace_insn_history *history;
856 struct btrace_insn_iterator begin, end;
afedecd3 857 struct ui_out *uiout;
23a7fe75 858 unsigned int context, covered;
afedecd3
MM
859
860 uiout = current_uiout;
2e783024 861 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 862 context = abs (size);
afedecd3
MM
863 if (context == 0)
864 error (_("Bad record instruction-history-size."));
865
23a7fe75
MM
866 btinfo = require_btrace ();
867 history = btinfo->insn_history;
868 if (history == NULL)
afedecd3 869 {
07bbe694 870 struct btrace_insn_iterator *replay;
afedecd3 871
9a24775b 872 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 873
07bbe694
MM
874 /* If we're replaying, we start at the replay position. Otherwise, we
875 start at the tail of the trace. */
876 replay = btinfo->replay;
877 if (replay != NULL)
878 begin = *replay;
879 else
880 btrace_insn_end (&begin, btinfo);
881
882 /* We start from here and expand in the requested direction. Then we
883 expand in the other direction, as well, to fill up any remaining
884 context. */
885 end = begin;
886 if (size < 0)
887 {
888 /* We want the current position covered, as well. */
889 covered = btrace_insn_next (&end, 1);
890 covered += btrace_insn_prev (&begin, context - covered);
891 covered += btrace_insn_next (&end, context - covered);
892 }
893 else
894 {
895 covered = btrace_insn_next (&end, context);
896 covered += btrace_insn_prev (&begin, context - covered);
897 }
afedecd3
MM
898 }
899 else
900 {
23a7fe75
MM
901 begin = history->begin;
902 end = history->end;
afedecd3 903
9a24775b 904 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 905 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 906
23a7fe75
MM
907 if (size < 0)
908 {
909 end = begin;
910 covered = btrace_insn_prev (&begin, context);
911 }
912 else
913 {
914 begin = end;
915 covered = btrace_insn_next (&end, context);
916 }
afedecd3
MM
917 }
918
23a7fe75 919 if (covered > 0)
31fd9caa 920 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
921 else
922 {
923 if (size < 0)
924 printf_unfiltered (_("At the start of the branch trace record.\n"));
925 else
926 printf_unfiltered (_("At the end of the branch trace record.\n"));
927 }
afedecd3 928
23a7fe75 929 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
930}
931
f6ac5f3d 932/* The insn_history_range method of target record-btrace. */
afedecd3 933
f6ac5f3d
PA
934void
935record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
936 gdb_disassembly_flags flags)
afedecd3
MM
937{
938 struct btrace_thread_info *btinfo;
23a7fe75 939 struct btrace_insn_iterator begin, end;
afedecd3 940 struct ui_out *uiout;
23a7fe75
MM
941 unsigned int low, high;
942 int found;
afedecd3
MM
943
944 uiout = current_uiout;
2e783024 945 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
946 low = from;
947 high = to;
afedecd3 948
9a24775b 949 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
950
951 /* Check for wrap-arounds. */
23a7fe75 952 if (low != from || high != to)
afedecd3
MM
953 error (_("Bad range."));
954
0688d04e 955 if (high < low)
afedecd3
MM
956 error (_("Bad range."));
957
23a7fe75 958 btinfo = require_btrace ();
afedecd3 959
23a7fe75
MM
960 found = btrace_find_insn_by_number (&begin, btinfo, low);
961 if (found == 0)
962 error (_("Range out of bounds."));
afedecd3 963
23a7fe75
MM
964 found = btrace_find_insn_by_number (&end, btinfo, high);
965 if (found == 0)
0688d04e
MM
966 {
967 /* Silently truncate the range. */
968 btrace_insn_end (&end, btinfo);
969 }
970 else
971 {
972 /* We want both begin and end to be inclusive. */
973 btrace_insn_next (&end, 1);
974 }
afedecd3 975
31fd9caa 976 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 977 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
978}
979
f6ac5f3d 980/* The insn_history_from method of target record-btrace. */
afedecd3 981
f6ac5f3d
PA
982void
983record_btrace_target::insn_history_from (ULONGEST from, int size,
984 gdb_disassembly_flags flags)
afedecd3
MM
985{
986 ULONGEST begin, end, context;
987
988 context = abs (size);
0688d04e
MM
989 if (context == 0)
990 error (_("Bad record instruction-history-size."));
afedecd3
MM
991
992 if (size < 0)
993 {
994 end = from;
995
996 if (from < context)
997 begin = 0;
998 else
0688d04e 999 begin = from - context + 1;
afedecd3
MM
1000 }
1001 else
1002 {
1003 begin = from;
0688d04e 1004 end = from + context - 1;
afedecd3
MM
1005
1006 /* Check for wrap-around. */
1007 if (end < begin)
1008 end = ULONGEST_MAX;
1009 }
1010
f6ac5f3d 1011 insn_history_range (begin, end, flags);
afedecd3
MM
1012}
1013
1014/* Print the instruction number range for a function call history line. */
1015
1016static void
23a7fe75
MM
1017btrace_call_history_insn_range (struct ui_out *uiout,
1018 const struct btrace_function *bfun)
afedecd3 1019{
7acbe133
MM
1020 unsigned int begin, end, size;
1021
0860c437 1022 size = bfun->insn.size ();
7acbe133 1023 gdb_assert (size > 0);
afedecd3 1024
23a7fe75 1025 begin = bfun->insn_offset;
7acbe133 1026 end = begin + size - 1;
afedecd3 1027
1f77b012 1028 uiout->field_unsigned ("insn begin", begin);
112e8700 1029 uiout->text (",");
1f77b012 1030 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1031}
1032
ce0dfbea
MM
1033/* Compute the lowest and highest source line for the instructions in BFUN
1034 and return them in PBEGIN and PEND.
1035 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1036 result from inlining or macro expansion. */
1037
1038static void
1039btrace_compute_src_line_range (const struct btrace_function *bfun,
1040 int *pbegin, int *pend)
1041{
ce0dfbea
MM
1042 struct symtab *symtab;
1043 struct symbol *sym;
ce0dfbea
MM
1044 int begin, end;
1045
1046 begin = INT_MAX;
1047 end = INT_MIN;
1048
1049 sym = bfun->sym;
1050 if (sym == NULL)
1051 goto out;
1052
1053 symtab = symbol_symtab (sym);
1054
0860c437 1055 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1056 {
1057 struct symtab_and_line sal;
1058
0860c437 1059 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1060 if (sal.symtab != symtab || sal.line == 0)
1061 continue;
1062
325fac50
PA
1063 begin = std::min (begin, sal.line);
1064 end = std::max (end, sal.line);
ce0dfbea
MM
1065 }
1066
1067 out:
1068 *pbegin = begin;
1069 *pend = end;
1070}
1071
afedecd3
MM
1072/* Print the source line information for a function call history line. */
1073
1074static void
23a7fe75
MM
1075btrace_call_history_src_line (struct ui_out *uiout,
1076 const struct btrace_function *bfun)
afedecd3
MM
1077{
1078 struct symbol *sym;
23a7fe75 1079 int begin, end;
afedecd3
MM
1080
1081 sym = bfun->sym;
1082 if (sym == NULL)
1083 return;
1084
112e8700 1085 uiout->field_string ("file",
cbe56571
TT
1086 symtab_to_filename_for_display (symbol_symtab (sym)),
1087 ui_out_style_kind::FILE);
afedecd3 1088
ce0dfbea 1089 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1090 if (end < begin)
afedecd3
MM
1091 return;
1092
112e8700 1093 uiout->text (":");
381befee 1094 uiout->field_signed ("min line", begin);
afedecd3 1095
23a7fe75 1096 if (end == begin)
afedecd3
MM
1097 return;
1098
112e8700 1099 uiout->text (",");
381befee 1100 uiout->field_signed ("max line", end);
afedecd3
MM
1101}
1102
0b722aec
MM
1103/* Get the name of a branch trace function. */
1104
1105static const char *
1106btrace_get_bfun_name (const struct btrace_function *bfun)
1107{
1108 struct minimal_symbol *msym;
1109 struct symbol *sym;
1110
1111 if (bfun == NULL)
1112 return "??";
1113
1114 msym = bfun->msym;
1115 sym = bfun->sym;
1116
1117 if (sym != NULL)
1118 return SYMBOL_PRINT_NAME (sym);
1119 else if (msym != NULL)
efd66ac6 1120 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1121 else
1122 return "??";
1123}
1124
afedecd3
MM
1125/* Disassemble a section of the recorded function trace. */
1126
1127static void
23a7fe75 1128btrace_call_history (struct ui_out *uiout,
8710b709 1129 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1130 const struct btrace_call_iterator *begin,
1131 const struct btrace_call_iterator *end,
8d297bbf 1132 int int_flags)
afedecd3 1133{
23a7fe75 1134 struct btrace_call_iterator it;
8d297bbf 1135 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1136
8d297bbf 1137 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1138 btrace_call_number (end));
afedecd3 1139
23a7fe75 1140 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1141 {
23a7fe75
MM
1142 const struct btrace_function *bfun;
1143 struct minimal_symbol *msym;
1144 struct symbol *sym;
1145
1146 bfun = btrace_call_get (&it);
23a7fe75 1147 sym = bfun->sym;
0b722aec 1148 msym = bfun->msym;
23a7fe75 1149
afedecd3 1150 /* Print the function index. */
1f77b012 1151 uiout->field_unsigned ("index", bfun->number);
112e8700 1152 uiout->text ("\t");
afedecd3 1153
31fd9caa
MM
1154 /* Indicate gaps in the trace. */
1155 if (bfun->errcode != 0)
1156 {
1157 const struct btrace_config *conf;
1158
1159 conf = btrace_conf (btinfo);
1160
1161 /* We have trace so we must have a configuration. */
1162 gdb_assert (conf != NULL);
1163
1164 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1165
1166 continue;
1167 }
1168
8710b709
MM
1169 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1170 {
1171 int level = bfun->level + btinfo->level, i;
1172
1173 for (i = 0; i < level; ++i)
112e8700 1174 uiout->text (" ");
8710b709
MM
1175 }
1176
1177 if (sym != NULL)
cbe56571
TT
1178 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1179 ui_out_style_kind::FUNCTION);
8710b709 1180 else if (msym != NULL)
cbe56571
TT
1181 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1182 ui_out_style_kind::FUNCTION);
112e8700 1183 else if (!uiout->is_mi_like_p ())
cbe56571
TT
1184 uiout->field_string ("function", "??",
1185 ui_out_style_kind::FUNCTION);
8710b709 1186
1e038f67 1187 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1188 {
112e8700 1189 uiout->text (_("\tinst "));
23a7fe75 1190 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1191 }
1192
1e038f67 1193 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1194 {
112e8700 1195 uiout->text (_("\tat "));
23a7fe75 1196 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1197 }
1198
112e8700 1199 uiout->text ("\n");
afedecd3
MM
1200 }
1201}
1202
f6ac5f3d 1203/* The call_history method of target record-btrace. */
afedecd3 1204
f6ac5f3d
PA
1205void
1206record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1207{
1208 struct btrace_thread_info *btinfo;
23a7fe75
MM
1209 struct btrace_call_history *history;
1210 struct btrace_call_iterator begin, end;
afedecd3 1211 struct ui_out *uiout;
23a7fe75 1212 unsigned int context, covered;
afedecd3
MM
1213
1214 uiout = current_uiout;
2e783024 1215 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1216 context = abs (size);
afedecd3
MM
1217 if (context == 0)
1218 error (_("Bad record function-call-history-size."));
1219
23a7fe75
MM
1220 btinfo = require_btrace ();
1221 history = btinfo->call_history;
1222 if (history == NULL)
afedecd3 1223 {
07bbe694 1224 struct btrace_insn_iterator *replay;
afedecd3 1225
0cb7c7b0 1226 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1227
07bbe694
MM
1228 /* If we're replaying, we start at the replay position. Otherwise, we
1229 start at the tail of the trace. */
1230 replay = btinfo->replay;
1231 if (replay != NULL)
1232 {
07bbe694 1233 begin.btinfo = btinfo;
a0f1b963 1234 begin.index = replay->call_index;
07bbe694
MM
1235 }
1236 else
1237 btrace_call_end (&begin, btinfo);
1238
1239 /* We start from here and expand in the requested direction. Then we
1240 expand in the other direction, as well, to fill up any remaining
1241 context. */
1242 end = begin;
1243 if (size < 0)
1244 {
1245 /* We want the current position covered, as well. */
1246 covered = btrace_call_next (&end, 1);
1247 covered += btrace_call_prev (&begin, context - covered);
1248 covered += btrace_call_next (&end, context - covered);
1249 }
1250 else
1251 {
1252 covered = btrace_call_next (&end, context);
1253 covered += btrace_call_prev (&begin, context- covered);
1254 }
afedecd3
MM
1255 }
1256 else
1257 {
23a7fe75
MM
1258 begin = history->begin;
1259 end = history->end;
afedecd3 1260
0cb7c7b0 1261 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1262 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1263
23a7fe75
MM
1264 if (size < 0)
1265 {
1266 end = begin;
1267 covered = btrace_call_prev (&begin, context);
1268 }
1269 else
1270 {
1271 begin = end;
1272 covered = btrace_call_next (&end, context);
1273 }
afedecd3
MM
1274 }
1275
23a7fe75 1276 if (covered > 0)
8710b709 1277 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1278 else
1279 {
1280 if (size < 0)
1281 printf_unfiltered (_("At the start of the branch trace record.\n"));
1282 else
1283 printf_unfiltered (_("At the end of the branch trace record.\n"));
1284 }
afedecd3 1285
23a7fe75 1286 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1287}
1288
f6ac5f3d 1289/* The call_history_range method of target record-btrace. */
afedecd3 1290
f6ac5f3d
PA
1291void
1292record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1293 record_print_flags flags)
afedecd3
MM
1294{
1295 struct btrace_thread_info *btinfo;
23a7fe75 1296 struct btrace_call_iterator begin, end;
afedecd3 1297 struct ui_out *uiout;
23a7fe75
MM
1298 unsigned int low, high;
1299 int found;
afedecd3
MM
1300
1301 uiout = current_uiout;
2e783024 1302 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1303 low = from;
1304 high = to;
afedecd3 1305
0cb7c7b0 1306 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1307
1308 /* Check for wrap-arounds. */
23a7fe75 1309 if (low != from || high != to)
afedecd3
MM
1310 error (_("Bad range."));
1311
0688d04e 1312 if (high < low)
afedecd3
MM
1313 error (_("Bad range."));
1314
23a7fe75 1315 btinfo = require_btrace ();
afedecd3 1316
23a7fe75
MM
1317 found = btrace_find_call_by_number (&begin, btinfo, low);
1318 if (found == 0)
1319 error (_("Range out of bounds."));
afedecd3 1320
23a7fe75
MM
1321 found = btrace_find_call_by_number (&end, btinfo, high);
1322 if (found == 0)
0688d04e
MM
1323 {
1324 /* Silently truncate the range. */
1325 btrace_call_end (&end, btinfo);
1326 }
1327 else
1328 {
1329 /* We want both begin and end to be inclusive. */
1330 btrace_call_next (&end, 1);
1331 }
afedecd3 1332
8710b709 1333 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1334 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1335}
1336
f6ac5f3d 1337/* The call_history_from method of target record-btrace. */
afedecd3 1338
f6ac5f3d
PA
1339void
1340record_btrace_target::call_history_from (ULONGEST from, int size,
1341 record_print_flags flags)
afedecd3
MM
1342{
1343 ULONGEST begin, end, context;
1344
1345 context = abs (size);
0688d04e
MM
1346 if (context == 0)
1347 error (_("Bad record function-call-history-size."));
afedecd3
MM
1348
1349 if (size < 0)
1350 {
1351 end = from;
1352
1353 if (from < context)
1354 begin = 0;
1355 else
0688d04e 1356 begin = from - context + 1;
afedecd3
MM
1357 }
1358 else
1359 {
1360 begin = from;
0688d04e 1361 end = from + context - 1;
afedecd3
MM
1362
1363 /* Check for wrap-around. */
1364 if (end < begin)
1365 end = ULONGEST_MAX;
1366 }
1367
f6ac5f3d 1368 call_history_range ( begin, end, flags);
afedecd3
MM
1369}
1370
f6ac5f3d 1371/* The record_method method of target record-btrace. */
b158a20f 1372
f6ac5f3d
PA
1373enum record_method
1374record_btrace_target::record_method (ptid_t ptid)
b158a20f 1375{
b158a20f
TW
1376 struct thread_info * const tp = find_thread_ptid (ptid);
1377
1378 if (tp == NULL)
1379 error (_("No thread."));
1380
1381 if (tp->btrace.target == NULL)
1382 return RECORD_METHOD_NONE;
1383
1384 return RECORD_METHOD_BTRACE;
1385}
1386
f6ac5f3d 1387/* The record_is_replaying method of target record-btrace. */
07bbe694 1388
57810aa7 1389bool
f6ac5f3d 1390record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1391{
08036331
PA
1392 for (thread_info *tp : all_non_exited_threads (ptid))
1393 if (btrace_is_replaying (tp))
57810aa7 1394 return true;
07bbe694 1395
57810aa7 1396 return false;
07bbe694
MM
1397}
1398
f6ac5f3d 1399/* The record_will_replay method of target record-btrace. */
7ff27e9b 1400
57810aa7 1401bool
f6ac5f3d 1402record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1403{
f6ac5f3d 1404 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1405}
1406
f6ac5f3d 1407/* The xfer_partial method of target record-btrace. */
633785ff 1408
f6ac5f3d
PA
1409enum target_xfer_status
1410record_btrace_target::xfer_partial (enum target_object object,
1411 const char *annex, gdb_byte *readbuf,
1412 const gdb_byte *writebuf, ULONGEST offset,
1413 ULONGEST len, ULONGEST *xfered_len)
633785ff 1414{
633785ff 1415 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1416 if (replay_memory_access == replay_memory_access_read_only
aef92902 1417 && !record_btrace_generating_corefile
f6ac5f3d 1418 && record_is_replaying (inferior_ptid))
633785ff
MM
1419 {
1420 switch (object)
1421 {
1422 case TARGET_OBJECT_MEMORY:
1423 {
1424 struct target_section *section;
1425
1426 /* We do not allow writing memory in general. */
1427 if (writebuf != NULL)
9b409511
YQ
1428 {
1429 *xfered_len = len;
bc113b4e 1430 return TARGET_XFER_UNAVAILABLE;
9b409511 1431 }
633785ff
MM
1432
1433 /* We allow reading readonly memory. */
f6ac5f3d 1434 section = target_section_by_addr (this, offset);
633785ff
MM
1435 if (section != NULL)
1436 {
1437 /* Check if the section we found is readonly. */
fd361982 1438 if ((bfd_section_flags (section->the_bfd_section)
633785ff
MM
1439 & SEC_READONLY) != 0)
1440 {
1441 /* Truncate the request to fit into this section. */
325fac50 1442 len = std::min (len, section->endaddr - offset);
633785ff
MM
1443 break;
1444 }
1445 }
1446
9b409511 1447 *xfered_len = len;
bc113b4e 1448 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1449 }
1450 }
1451 }
1452
1453 /* Forward the request. */
b6a8c27b
PA
1454 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1455 offset, len, xfered_len);
633785ff
MM
1456}
1457
f6ac5f3d 1458/* The insert_breakpoint method of target record-btrace. */
633785ff 1459
f6ac5f3d
PA
1460int
1461record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1462 struct bp_target_info *bp_tgt)
633785ff 1463{
67b5c0c1
MM
1464 const char *old;
1465 int ret;
633785ff
MM
1466
1467 /* Inserting breakpoints requires accessing memory. Allow it for the
1468 duration of this function. */
67b5c0c1
MM
1469 old = replay_memory_access;
1470 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1471
1472 ret = 0;
a70b8144 1473 try
492d29ea 1474 {
b6a8c27b 1475 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1476 }
230d2906 1477 catch (const gdb_exception &except)
492d29ea 1478 {
6c63c96a 1479 replay_memory_access = old;
eedc3f4f 1480 throw;
492d29ea 1481 }
6c63c96a 1482 replay_memory_access = old;
633785ff
MM
1483
1484 return ret;
1485}
1486
f6ac5f3d 1487/* The remove_breakpoint method of target record-btrace. */
633785ff 1488
f6ac5f3d
PA
1489int
1490record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1491 struct bp_target_info *bp_tgt,
1492 enum remove_bp_reason reason)
633785ff 1493{
67b5c0c1
MM
1494 const char *old;
1495 int ret;
633785ff
MM
1496
1497 /* Removing breakpoints requires accessing memory. Allow it for the
1498 duration of this function. */
67b5c0c1
MM
1499 old = replay_memory_access;
1500 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1501
1502 ret = 0;
a70b8144 1503 try
492d29ea 1504 {
b6a8c27b 1505 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1506 }
230d2906 1507 catch (const gdb_exception &except)
492d29ea 1508 {
6c63c96a 1509 replay_memory_access = old;
eedc3f4f 1510 throw;
492d29ea 1511 }
6c63c96a 1512 replay_memory_access = old;
633785ff
MM
1513
1514 return ret;
1515}
1516
f6ac5f3d 1517/* The fetch_registers method of target record-btrace. */
1f3ef581 1518
f6ac5f3d
PA
1519void
1520record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1521{
1522 struct btrace_insn_iterator *replay;
1523 struct thread_info *tp;
1524
222312d3 1525 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1526 gdb_assert (tp != NULL);
1527
1528 replay = tp->btrace.replay;
aef92902 1529 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1530 {
1531 const struct btrace_insn *insn;
1532 struct gdbarch *gdbarch;
1533 int pcreg;
1534
ac7936df 1535 gdbarch = regcache->arch ();
1f3ef581
MM
1536 pcreg = gdbarch_pc_regnum (gdbarch);
1537 if (pcreg < 0)
1538 return;
1539
1540 /* We can only provide the PC register. */
1541 if (regno >= 0 && regno != pcreg)
1542 return;
1543
1544 insn = btrace_insn_get (replay);
1545 gdb_assert (insn != NULL);
1546
73e1c03f 1547 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1548 }
1549 else
b6a8c27b 1550 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1551}
1552
f6ac5f3d 1553/* The store_registers method of target record-btrace. */
1f3ef581 1554
f6ac5f3d
PA
1555void
1556record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1557{
a52eab48 1558 if (!record_btrace_generating_corefile
222312d3 1559 && record_is_replaying (regcache->ptid ()))
4d10e986 1560 error (_("Cannot write registers while replaying."));
1f3ef581 1561
491144b5 1562 gdb_assert (may_write_registers);
1f3ef581 1563
b6a8c27b 1564 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1565}
1566
f6ac5f3d 1567/* The prepare_to_store method of target record-btrace. */
1f3ef581 1568
f6ac5f3d
PA
1569void
1570record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1571{
a52eab48 1572 if (!record_btrace_generating_corefile
222312d3 1573 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1574 return;
1575
b6a8c27b 1576 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1577}
1578
0b722aec
MM
1579/* The branch trace frame cache. */
1580
1581struct btrace_frame_cache
1582{
1583 /* The thread. */
1584 struct thread_info *tp;
1585
1586 /* The frame info. */
1587 struct frame_info *frame;
1588
1589 /* The branch trace function segment. */
1590 const struct btrace_function *bfun;
1591};
1592
1593/* A struct btrace_frame_cache hash table indexed by NEXT. */
1594
1595static htab_t bfcache;
1596
1597/* hash_f for htab_create_alloc of bfcache. */
1598
1599static hashval_t
1600bfcache_hash (const void *arg)
1601{
19ba03f4
SM
1602 const struct btrace_frame_cache *cache
1603 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1604
1605 return htab_hash_pointer (cache->frame);
1606}
1607
1608/* eq_f for htab_create_alloc of bfcache. */
1609
1610static int
1611bfcache_eq (const void *arg1, const void *arg2)
1612{
19ba03f4
SM
1613 const struct btrace_frame_cache *cache1
1614 = (const struct btrace_frame_cache *) arg1;
1615 const struct btrace_frame_cache *cache2
1616 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1617
1618 return cache1->frame == cache2->frame;
1619}
1620
1621/* Create a new btrace frame cache. */
1622
1623static struct btrace_frame_cache *
1624bfcache_new (struct frame_info *frame)
1625{
1626 struct btrace_frame_cache *cache;
1627 void **slot;
1628
1629 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1630 cache->frame = frame;
1631
1632 slot = htab_find_slot (bfcache, cache, INSERT);
1633 gdb_assert (*slot == NULL);
1634 *slot = cache;
1635
1636 return cache;
1637}
1638
1639/* Extract the branch trace function from a branch trace frame. */
1640
1641static const struct btrace_function *
1642btrace_get_frame_function (struct frame_info *frame)
1643{
1644 const struct btrace_frame_cache *cache;
0b722aec
MM
1645 struct btrace_frame_cache pattern;
1646 void **slot;
1647
1648 pattern.frame = frame;
1649
1650 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1651 if (slot == NULL)
1652 return NULL;
1653
19ba03f4 1654 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1655 return cache->bfun;
1656}
1657
cecac1ab
MM
1658/* Implement stop_reason method for record_btrace_frame_unwind. */
1659
1660static enum unwind_stop_reason
1661record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1662 void **this_cache)
1663{
0b722aec
MM
1664 const struct btrace_frame_cache *cache;
1665 const struct btrace_function *bfun;
1666
19ba03f4 1667 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1668 bfun = cache->bfun;
1669 gdb_assert (bfun != NULL);
1670
42bfe59e 1671 if (bfun->up == 0)
0b722aec
MM
1672 return UNWIND_UNAVAILABLE;
1673
1674 return UNWIND_NO_REASON;
cecac1ab
MM
1675}
1676
1677/* Implement this_id method for record_btrace_frame_unwind. */
1678
1679static void
1680record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1681 struct frame_id *this_id)
1682{
0b722aec
MM
1683 const struct btrace_frame_cache *cache;
1684 const struct btrace_function *bfun;
4aeb0dfc 1685 struct btrace_call_iterator it;
0b722aec
MM
1686 CORE_ADDR code, special;
1687
19ba03f4 1688 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1689
1690 bfun = cache->bfun;
1691 gdb_assert (bfun != NULL);
1692
4aeb0dfc
TW
1693 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1694 bfun = btrace_call_get (&it);
0b722aec
MM
1695
1696 code = get_frame_func (this_frame);
1697 special = bfun->number;
1698
1699 *this_id = frame_id_build_unavailable_stack_special (code, special);
1700
1701 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1702 btrace_get_bfun_name (cache->bfun),
1703 core_addr_to_string_nz (this_id->code_addr),
1704 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1705}
1706
1707/* Implement prev_register method for record_btrace_frame_unwind. */
1708
1709static struct value *
1710record_btrace_frame_prev_register (struct frame_info *this_frame,
1711 void **this_cache,
1712 int regnum)
1713{
0b722aec
MM
1714 const struct btrace_frame_cache *cache;
1715 const struct btrace_function *bfun, *caller;
42bfe59e 1716 struct btrace_call_iterator it;
0b722aec
MM
1717 struct gdbarch *gdbarch;
1718 CORE_ADDR pc;
1719 int pcreg;
1720
1721 gdbarch = get_frame_arch (this_frame);
1722 pcreg = gdbarch_pc_regnum (gdbarch);
1723 if (pcreg < 0 || regnum != pcreg)
1724 throw_error (NOT_AVAILABLE_ERROR,
1725 _("Registers are not available in btrace record history"));
1726
19ba03f4 1727 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1728 bfun = cache->bfun;
1729 gdb_assert (bfun != NULL);
1730
42bfe59e 1731 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1732 throw_error (NOT_AVAILABLE_ERROR,
1733 _("No caller in btrace record history"));
1734
42bfe59e
TW
1735 caller = btrace_call_get (&it);
1736
0b722aec 1737 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1738 pc = caller->insn.front ().pc;
0b722aec
MM
1739 else
1740 {
0860c437 1741 pc = caller->insn.back ().pc;
0b722aec
MM
1742 pc += gdb_insn_length (gdbarch, pc);
1743 }
1744
1745 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1746 btrace_get_bfun_name (bfun), bfun->level,
1747 core_addr_to_string_nz (pc));
1748
1749 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1750}
1751
1752/* Implement sniffer method for record_btrace_frame_unwind. */
1753
1754static int
1755record_btrace_frame_sniffer (const struct frame_unwind *self,
1756 struct frame_info *this_frame,
1757 void **this_cache)
1758{
0b722aec
MM
1759 const struct btrace_function *bfun;
1760 struct btrace_frame_cache *cache;
cecac1ab 1761 struct thread_info *tp;
0b722aec 1762 struct frame_info *next;
cecac1ab
MM
1763
1764 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1765 tp = inferior_thread ();
cecac1ab 1766
0b722aec
MM
1767 bfun = NULL;
1768 next = get_next_frame (this_frame);
1769 if (next == NULL)
1770 {
1771 const struct btrace_insn_iterator *replay;
1772
1773 replay = tp->btrace.replay;
1774 if (replay != NULL)
08c3f6d2 1775 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1776 }
1777 else
1778 {
1779 const struct btrace_function *callee;
42bfe59e 1780 struct btrace_call_iterator it;
0b722aec
MM
1781
1782 callee = btrace_get_frame_function (next);
42bfe59e
TW
1783 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1784 return 0;
1785
1786 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1787 return 0;
1788
1789 bfun = btrace_call_get (&it);
0b722aec
MM
1790 }
1791
1792 if (bfun == NULL)
1793 return 0;
1794
1795 DEBUG ("[frame] sniffed frame for %s on level %d",
1796 btrace_get_bfun_name (bfun), bfun->level);
1797
1798 /* This is our frame. Initialize the frame cache. */
1799 cache = bfcache_new (this_frame);
1800 cache->tp = tp;
1801 cache->bfun = bfun;
1802
1803 *this_cache = cache;
1804 return 1;
1805}
1806
1807/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1808
1809static int
1810record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1811 struct frame_info *this_frame,
1812 void **this_cache)
1813{
1814 const struct btrace_function *bfun, *callee;
1815 struct btrace_frame_cache *cache;
42bfe59e 1816 struct btrace_call_iterator it;
0b722aec 1817 struct frame_info *next;
42bfe59e 1818 struct thread_info *tinfo;
0b722aec
MM
1819
1820 next = get_next_frame (this_frame);
1821 if (next == NULL)
1822 return 0;
1823
1824 callee = btrace_get_frame_function (next);
1825 if (callee == NULL)
1826 return 0;
1827
1828 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1829 return 0;
1830
00431a78 1831 tinfo = inferior_thread ();
42bfe59e 1832 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1833 return 0;
1834
42bfe59e
TW
1835 bfun = btrace_call_get (&it);
1836
0b722aec
MM
1837 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1838 btrace_get_bfun_name (bfun), bfun->level);
1839
1840 /* This is our frame. Initialize the frame cache. */
1841 cache = bfcache_new (this_frame);
42bfe59e 1842 cache->tp = tinfo;
0b722aec
MM
1843 cache->bfun = bfun;
1844
1845 *this_cache = cache;
1846 return 1;
1847}
1848
1849static void
1850record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1851{
1852 struct btrace_frame_cache *cache;
1853 void **slot;
1854
19ba03f4 1855 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1856
1857 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1858 gdb_assert (slot != NULL);
1859
1860 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1861}
1862
1863/* btrace recording does not store previous memory content, neither the stack
1864 frames content. Any unwinding would return errorneous results as the stack
1865 contents no longer matches the changed PC value restored from history.
1866 Therefore this unwinder reports any possibly unwound registers as
1867 <unavailable>. */
1868
0b722aec 1869const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1870{
1871 NORMAL_FRAME,
1872 record_btrace_frame_unwind_stop_reason,
1873 record_btrace_frame_this_id,
1874 record_btrace_frame_prev_register,
1875 NULL,
0b722aec
MM
1876 record_btrace_frame_sniffer,
1877 record_btrace_frame_dealloc_cache
1878};
1879
1880const struct frame_unwind record_btrace_tailcall_frame_unwind =
1881{
1882 TAILCALL_FRAME,
1883 record_btrace_frame_unwind_stop_reason,
1884 record_btrace_frame_this_id,
1885 record_btrace_frame_prev_register,
1886 NULL,
1887 record_btrace_tailcall_frame_sniffer,
1888 record_btrace_frame_dealloc_cache
cecac1ab 1889};
b2f4cfde 1890
f6ac5f3d 1891/* Implement the get_unwinder method. */
ac01945b 1892
f6ac5f3d
PA
1893const struct frame_unwind *
1894record_btrace_target::get_unwinder ()
ac01945b
TT
1895{
1896 return &record_btrace_frame_unwind;
1897}
1898
f6ac5f3d 1899/* Implement the get_tailcall_unwinder method. */
ac01945b 1900
f6ac5f3d
PA
1901const struct frame_unwind *
1902record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1903{
1904 return &record_btrace_tailcall_frame_unwind;
1905}
1906
987e68b1
MM
1907/* Return a human-readable string for FLAG. */
1908
1909static const char *
1910btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1911{
1912 switch (flag)
1913 {
1914 case BTHR_STEP:
1915 return "step";
1916
1917 case BTHR_RSTEP:
1918 return "reverse-step";
1919
1920 case BTHR_CONT:
1921 return "cont";
1922
1923 case BTHR_RCONT:
1924 return "reverse-cont";
1925
1926 case BTHR_STOP:
1927 return "stop";
1928 }
1929
1930 return "<invalid>";
1931}
1932
52834460
MM
1933/* Indicate that TP should be resumed according to FLAG. */
1934
1935static void
1936record_btrace_resume_thread (struct thread_info *tp,
1937 enum btrace_thread_flag flag)
1938{
1939 struct btrace_thread_info *btinfo;
1940
43792cf0 1941 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1942 target_pid_to_str (tp->ptid).c_str (), flag,
1943 btrace_thread_flag_to_str (flag));
52834460
MM
1944
1945 btinfo = &tp->btrace;
1946
52834460 1947 /* Fetch the latest branch trace. */
4a4495d6 1948 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1949
0ca912df
MM
1950 /* A resume request overwrites a preceding resume or stop request. */
1951 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1952 btinfo->flags |= flag;
1953}
1954
ec71cc2f
MM
1955/* Get the current frame for TP. */
1956
79b8d3b0
TT
1957static struct frame_id
1958get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1959{
79b8d3b0 1960 struct frame_id id;
ec71cc2f
MM
1961 int executing;
1962
00431a78
PA
1963 /* Set current thread, which is implicitly used by
1964 get_current_frame. */
1965 scoped_restore_current_thread restore_thread;
1966
1967 switch_to_thread (tp);
ec71cc2f
MM
1968
1969 /* Clear the executing flag to allow changes to the current frame.
1970 We are not actually running, yet. We just started a reverse execution
1971 command or a record goto command.
1972 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1973 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1974 move the thread. Since we need to recompute the stack, we temporarily
1975 set EXECUTING to flase. */
00431a78
PA
1976 executing = tp->executing;
1977 set_executing (inferior_ptid, false);
ec71cc2f 1978
79b8d3b0 1979 id = null_frame_id;
a70b8144 1980 try
ec71cc2f 1981 {
79b8d3b0 1982 id = get_frame_id (get_current_frame ());
ec71cc2f 1983 }
230d2906 1984 catch (const gdb_exception &except)
ec71cc2f
MM
1985 {
1986 /* Restore the previous execution state. */
1987 set_executing (inferior_ptid, executing);
1988
eedc3f4f 1989 throw;
ec71cc2f 1990 }
ec71cc2f
MM
1991
1992 /* Restore the previous execution state. */
1993 set_executing (inferior_ptid, executing);
1994
79b8d3b0 1995 return id;
ec71cc2f
MM
1996}
1997
52834460
MM
1998/* Start replaying a thread. */
1999
2000static struct btrace_insn_iterator *
2001record_btrace_start_replaying (struct thread_info *tp)
2002{
52834460
MM
2003 struct btrace_insn_iterator *replay;
2004 struct btrace_thread_info *btinfo;
52834460
MM
2005
2006 btinfo = &tp->btrace;
2007 replay = NULL;
2008
2009 /* We can't start replaying without trace. */
b54b03bd 2010 if (btinfo->functions.empty ())
52834460
MM
2011 return NULL;
2012
52834460
MM
2013 /* GDB stores the current frame_id when stepping in order to detects steps
2014 into subroutines.
2015 Since frames are computed differently when we're replaying, we need to
2016 recompute those stored frames and fix them up so we can still detect
2017 subroutines after we started replaying. */
a70b8144 2018 try
52834460 2019 {
52834460
MM
2020 struct frame_id frame_id;
2021 int upd_step_frame_id, upd_step_stack_frame_id;
2022
2023 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2024 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2025
2026 /* Check if we need to update any stepping-related frame id's. */
2027 upd_step_frame_id = frame_id_eq (frame_id,
2028 tp->control.step_frame_id);
2029 upd_step_stack_frame_id = frame_id_eq (frame_id,
2030 tp->control.step_stack_frame_id);
2031
2032 /* We start replaying at the end of the branch trace. This corresponds
2033 to the current instruction. */
8d749320 2034 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2035 btrace_insn_end (replay, btinfo);
2036
31fd9caa
MM
2037 /* Skip gaps at the end of the trace. */
2038 while (btrace_insn_get (replay) == NULL)
2039 {
2040 unsigned int steps;
2041
2042 steps = btrace_insn_prev (replay, 1);
2043 if (steps == 0)
2044 error (_("No trace."));
2045 }
2046
52834460
MM
2047 /* We're not replaying, yet. */
2048 gdb_assert (btinfo->replay == NULL);
2049 btinfo->replay = replay;
2050
2051 /* Make sure we're not using any stale registers. */
00431a78 2052 registers_changed_thread (tp);
52834460
MM
2053
2054 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2055 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2056
2057 /* Replace stepping related frames where necessary. */
2058 if (upd_step_frame_id)
2059 tp->control.step_frame_id = frame_id;
2060 if (upd_step_stack_frame_id)
2061 tp->control.step_stack_frame_id = frame_id;
2062 }
230d2906 2063 catch (const gdb_exception &except)
52834460
MM
2064 {
2065 xfree (btinfo->replay);
2066 btinfo->replay = NULL;
2067
00431a78 2068 registers_changed_thread (tp);
52834460 2069
eedc3f4f 2070 throw;
52834460
MM
2071 }
2072
2073 return replay;
2074}
2075
2076/* Stop replaying a thread. */
2077
2078static void
2079record_btrace_stop_replaying (struct thread_info *tp)
2080{
2081 struct btrace_thread_info *btinfo;
2082
2083 btinfo = &tp->btrace;
2084
2085 xfree (btinfo->replay);
2086 btinfo->replay = NULL;
2087
2088 /* Make sure we're not leaving any stale registers. */
00431a78 2089 registers_changed_thread (tp);
52834460
MM
2090}
2091
e3cfc1c7
MM
2092/* Stop replaying TP if it is at the end of its execution history. */
2093
2094static void
2095record_btrace_stop_replaying_at_end (struct thread_info *tp)
2096{
2097 struct btrace_insn_iterator *replay, end;
2098 struct btrace_thread_info *btinfo;
2099
2100 btinfo = &tp->btrace;
2101 replay = btinfo->replay;
2102
2103 if (replay == NULL)
2104 return;
2105
2106 btrace_insn_end (&end, btinfo);
2107
2108 if (btrace_insn_cmp (replay, &end) == 0)
2109 record_btrace_stop_replaying (tp);
2110}
2111
f6ac5f3d 2112/* The resume method of target record-btrace. */
b2f4cfde 2113
f6ac5f3d
PA
2114void
2115record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2116{
d2939ba2 2117 enum btrace_thread_flag flag, cflag;
52834460 2118
a068643d 2119 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2120 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2121 step ? "step" : "cont");
52834460 2122
0ca912df
MM
2123 /* Store the execution direction of the last resume.
2124
f6ac5f3d 2125 If there is more than one resume call, we have to rely on infrun
0ca912df 2126 to not change the execution direction in-between. */
f6ac5f3d 2127 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2128
0ca912df 2129 /* As long as we're not replaying, just forward the request.
52834460 2130
0ca912df
MM
2131 For non-stop targets this means that no thread is replaying. In order to
2132 make progress, we may need to explicitly move replaying threads to the end
2133 of their execution history. */
f6ac5f3d
PA
2134 if ((::execution_direction != EXEC_REVERSE)
2135 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2136 {
b6a8c27b 2137 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2138 return;
b2f4cfde
MM
2139 }
2140
52834460 2141 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2142 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2143 {
2144 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2145 cflag = BTHR_RCONT;
2146 }
52834460 2147 else
d2939ba2
MM
2148 {
2149 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2150 cflag = BTHR_CONT;
2151 }
52834460 2152
52834460 2153 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2154 record_btrace_wait below.
2155
2156 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2157 if (!target_is_non_stop_p ())
2158 {
26a57c92 2159 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2160
08036331
PA
2161 for (thread_info *tp : all_non_exited_threads (ptid))
2162 {
2163 if (tp->ptid.matches (inferior_ptid))
2164 record_btrace_resume_thread (tp, flag);
2165 else
2166 record_btrace_resume_thread (tp, cflag);
2167 }
d2939ba2
MM
2168 }
2169 else
2170 {
08036331
PA
2171 for (thread_info *tp : all_non_exited_threads (ptid))
2172 record_btrace_resume_thread (tp, flag);
d2939ba2 2173 }
70ad5bff
MM
2174
2175 /* Async support. */
2176 if (target_can_async_p ())
2177 {
6a3753b3 2178 target_async (1);
70ad5bff
MM
2179 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2180 }
52834460
MM
2181}
2182
f6ac5f3d 2183/* The commit_resume method of target record-btrace. */
85ad3aaf 2184
f6ac5f3d
PA
2185void
2186record_btrace_target::commit_resume ()
85ad3aaf 2187{
f6ac5f3d
PA
2188 if ((::execution_direction != EXEC_REVERSE)
2189 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2190 beneath ()->commit_resume ();
85ad3aaf
PA
2191}
2192
987e68b1
MM
2193/* Cancel resuming TP. */
2194
2195static void
2196record_btrace_cancel_resume (struct thread_info *tp)
2197{
2198 enum btrace_thread_flag flags;
2199
2200 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2201 if (flags == 0)
2202 return;
2203
43792cf0
PA
2204 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2205 print_thread_id (tp),
a068643d 2206 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1
MM
2207 btrace_thread_flag_to_str (flags));
2208
2209 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2210 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2211}
2212
2213/* Return a target_waitstatus indicating that we ran out of history. */
2214
2215static struct target_waitstatus
2216btrace_step_no_history (void)
2217{
2218 struct target_waitstatus status;
2219
2220 status.kind = TARGET_WAITKIND_NO_HISTORY;
2221
2222 return status;
2223}
2224
2225/* Return a target_waitstatus indicating that a step finished. */
2226
2227static struct target_waitstatus
2228btrace_step_stopped (void)
2229{
2230 struct target_waitstatus status;
2231
2232 status.kind = TARGET_WAITKIND_STOPPED;
2233 status.value.sig = GDB_SIGNAL_TRAP;
2234
2235 return status;
2236}
2237
6e4879f0
MM
2238/* Return a target_waitstatus indicating that a thread was stopped as
2239 requested. */
2240
2241static struct target_waitstatus
2242btrace_step_stopped_on_request (void)
2243{
2244 struct target_waitstatus status;
2245
2246 status.kind = TARGET_WAITKIND_STOPPED;
2247 status.value.sig = GDB_SIGNAL_0;
2248
2249 return status;
2250}
2251
d825d248
MM
2252/* Return a target_waitstatus indicating a spurious stop. */
2253
2254static struct target_waitstatus
2255btrace_step_spurious (void)
2256{
2257 struct target_waitstatus status;
2258
2259 status.kind = TARGET_WAITKIND_SPURIOUS;
2260
2261 return status;
2262}
2263
e3cfc1c7
MM
2264/* Return a target_waitstatus indicating that the thread was not resumed. */
2265
2266static struct target_waitstatus
2267btrace_step_no_resumed (void)
2268{
2269 struct target_waitstatus status;
2270
2271 status.kind = TARGET_WAITKIND_NO_RESUMED;
2272
2273 return status;
2274}
2275
2276/* Return a target_waitstatus indicating that we should wait again. */
2277
2278static struct target_waitstatus
2279btrace_step_again (void)
2280{
2281 struct target_waitstatus status;
2282
2283 status.kind = TARGET_WAITKIND_IGNORE;
2284
2285 return status;
2286}
2287
52834460
MM
2288/* Clear the record histories. */
2289
2290static void
2291record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2292{
2293 xfree (btinfo->insn_history);
2294 xfree (btinfo->call_history);
2295
2296 btinfo->insn_history = NULL;
2297 btinfo->call_history = NULL;
2298}
2299
3c615f99
MM
2300/* Check whether TP's current replay position is at a breakpoint. */
2301
2302static int
2303record_btrace_replay_at_breakpoint (struct thread_info *tp)
2304{
2305 struct btrace_insn_iterator *replay;
2306 struct btrace_thread_info *btinfo;
2307 const struct btrace_insn *insn;
3c615f99
MM
2308
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
2312 if (replay == NULL)
2313 return 0;
2314
2315 insn = btrace_insn_get (replay);
2316 if (insn == NULL)
2317 return 0;
2318
00431a78 2319 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2320 &btinfo->stop_reason);
2321}
2322
d825d248 2323/* Step one instruction in forward direction. */
52834460
MM
2324
2325static struct target_waitstatus
d825d248 2326record_btrace_single_step_forward (struct thread_info *tp)
52834460 2327{
b61ce85c 2328 struct btrace_insn_iterator *replay, end, start;
52834460 2329 struct btrace_thread_info *btinfo;
52834460 2330
d825d248
MM
2331 btinfo = &tp->btrace;
2332 replay = btinfo->replay;
2333
2334 /* We're done if we're not replaying. */
2335 if (replay == NULL)
2336 return btrace_step_no_history ();
2337
011c71b6
MM
2338 /* Check if we're stepping a breakpoint. */
2339 if (record_btrace_replay_at_breakpoint (tp))
2340 return btrace_step_stopped ();
2341
b61ce85c
MM
2342 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2343 jump back to the instruction at which we started. */
2344 start = *replay;
d825d248
MM
2345 do
2346 {
2347 unsigned int steps;
2348
e3cfc1c7
MM
2349 /* We will bail out here if we continue stepping after reaching the end
2350 of the execution history. */
d825d248
MM
2351 steps = btrace_insn_next (replay, 1);
2352 if (steps == 0)
b61ce85c
MM
2353 {
2354 *replay = start;
2355 return btrace_step_no_history ();
2356 }
d825d248
MM
2357 }
2358 while (btrace_insn_get (replay) == NULL);
2359
2360 /* Determine the end of the instruction trace. */
2361 btrace_insn_end (&end, btinfo);
2362
e3cfc1c7
MM
2363 /* The execution trace contains (and ends with) the current instruction.
2364 This instruction has not been executed, yet, so the trace really ends
2365 one instruction earlier. */
d825d248 2366 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2367 return btrace_step_no_history ();
d825d248
MM
2368
2369 return btrace_step_spurious ();
2370}
2371
2372/* Step one instruction in backward direction. */
2373
2374static struct target_waitstatus
2375record_btrace_single_step_backward (struct thread_info *tp)
2376{
b61ce85c 2377 struct btrace_insn_iterator *replay, start;
d825d248 2378 struct btrace_thread_info *btinfo;
e59fa00f 2379
52834460
MM
2380 btinfo = &tp->btrace;
2381 replay = btinfo->replay;
2382
d825d248
MM
2383 /* Start replaying if we're not already doing so. */
2384 if (replay == NULL)
2385 replay = record_btrace_start_replaying (tp);
2386
2387 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2388 Skip gaps during replay. If we end up at a gap (at the beginning of
2389 the trace), jump back to the instruction at which we started. */
2390 start = *replay;
d825d248
MM
2391 do
2392 {
2393 unsigned int steps;
2394
2395 steps = btrace_insn_prev (replay, 1);
2396 if (steps == 0)
b61ce85c
MM
2397 {
2398 *replay = start;
2399 return btrace_step_no_history ();
2400 }
d825d248
MM
2401 }
2402 while (btrace_insn_get (replay) == NULL);
2403
011c71b6
MM
2404 /* Check if we're stepping a breakpoint.
2405
2406 For reverse-stepping, this check is after the step. There is logic in
2407 infrun.c that handles reverse-stepping separately. See, for example,
2408 proceed and adjust_pc_after_break.
2409
2410 This code assumes that for reverse-stepping, PC points to the last
2411 de-executed instruction, whereas for forward-stepping PC points to the
2412 next to-be-executed instruction. */
2413 if (record_btrace_replay_at_breakpoint (tp))
2414 return btrace_step_stopped ();
2415
d825d248
MM
2416 return btrace_step_spurious ();
2417}
2418
2419/* Step a single thread. */
2420
2421static struct target_waitstatus
2422record_btrace_step_thread (struct thread_info *tp)
2423{
2424 struct btrace_thread_info *btinfo;
2425 struct target_waitstatus status;
2426 enum btrace_thread_flag flags;
2427
2428 btinfo = &tp->btrace;
2429
6e4879f0
MM
2430 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2431 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2432
43792cf0 2433 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d 2434 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1 2435 btrace_thread_flag_to_str (flags));
52834460 2436
6e4879f0
MM
2437 /* We can't step without an execution history. */
2438 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2439 return btrace_step_no_history ();
2440
52834460
MM
2441 switch (flags)
2442 {
2443 default:
2444 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2445
6e4879f0
MM
2446 case BTHR_STOP:
2447 return btrace_step_stopped_on_request ();
2448
52834460 2449 case BTHR_STEP:
d825d248
MM
2450 status = record_btrace_single_step_forward (tp);
2451 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2452 break;
52834460
MM
2453
2454 return btrace_step_stopped ();
2455
2456 case BTHR_RSTEP:
d825d248
MM
2457 status = record_btrace_single_step_backward (tp);
2458 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2459 break;
52834460
MM
2460
2461 return btrace_step_stopped ();
2462
2463 case BTHR_CONT:
e3cfc1c7
MM
2464 status = record_btrace_single_step_forward (tp);
2465 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2466 break;
52834460 2467
e3cfc1c7
MM
2468 btinfo->flags |= flags;
2469 return btrace_step_again ();
52834460
MM
2470
2471 case BTHR_RCONT:
e3cfc1c7
MM
2472 status = record_btrace_single_step_backward (tp);
2473 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2474 break;
52834460 2475
e3cfc1c7
MM
2476 btinfo->flags |= flags;
2477 return btrace_step_again ();
2478 }
d825d248 2479
f6ac5f3d 2480 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2481 method will stop the thread for whom the event is reported. */
2482 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2483 btinfo->flags |= flags;
52834460 2484
e3cfc1c7 2485 return status;
b2f4cfde
MM
2486}
2487
a6b5be76
MM
2488/* Announce further events if necessary. */
2489
2490static void
53127008
SM
2491record_btrace_maybe_mark_async_event
2492 (const std::vector<thread_info *> &moving,
2493 const std::vector<thread_info *> &no_history)
a6b5be76 2494{
53127008
SM
2495 bool more_moving = !moving.empty ();
2496 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2497
2498 if (!more_moving && !more_no_history)
2499 return;
2500
2501 if (more_moving)
2502 DEBUG ("movers pending");
2503
2504 if (more_no_history)
2505 DEBUG ("no-history pending");
2506
2507 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2508}
2509
f6ac5f3d 2510/* The wait method of target record-btrace. */
b2f4cfde 2511
f6ac5f3d
PA
2512ptid_t
2513record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2514 int options)
b2f4cfde 2515{
53127008
SM
2516 std::vector<thread_info *> moving;
2517 std::vector<thread_info *> no_history;
52834460 2518
a068643d 2519 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
52834460 2520
b2f4cfde 2521 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2522 if ((::execution_direction != EXEC_REVERSE)
2523 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2524 {
b6a8c27b 2525 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2526 }
2527
e3cfc1c7 2528 /* Keep a work list of moving threads. */
08036331
PA
2529 for (thread_info *tp : all_non_exited_threads (ptid))
2530 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2531 moving.push_back (tp);
e3cfc1c7 2532
53127008 2533 if (moving.empty ())
52834460 2534 {
e3cfc1c7 2535 *status = btrace_step_no_resumed ();
52834460 2536
a068643d 2537 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2538 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2539
e3cfc1c7 2540 return null_ptid;
52834460
MM
2541 }
2542
e3cfc1c7
MM
2543 /* Step moving threads one by one, one step each, until either one thread
2544 reports an event or we run out of threads to step.
2545
2546 When stepping more than one thread, chances are that some threads reach
2547 the end of their execution history earlier than others. If we reported
2548 this immediately, all-stop on top of non-stop would stop all threads and
2549 resume the same threads next time. And we would report the same thread
2550 having reached the end of its execution history again.
2551
2552 In the worst case, this would starve the other threads. But even if other
2553 threads would be allowed to make progress, this would result in far too
2554 many intermediate stops.
2555
2556 We therefore delay the reporting of "no execution history" until we have
2557 nothing else to report. By this time, all threads should have moved to
2558 either the beginning or the end of their execution history. There will
2559 be a single user-visible stop. */
53127008
SM
2560 struct thread_info *eventing = NULL;
2561 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2562 {
53127008 2563 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2564 {
53127008
SM
2565 thread_info *tp = moving[ix];
2566
e3cfc1c7
MM
2567 *status = record_btrace_step_thread (tp);
2568
2569 switch (status->kind)
2570 {
2571 case TARGET_WAITKIND_IGNORE:
2572 ix++;
2573 break;
2574
2575 case TARGET_WAITKIND_NO_HISTORY:
53127008 2576 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2577 break;
2578
2579 default:
53127008 2580 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2581 break;
2582 }
2583 }
2584 }
2585
2586 if (eventing == NULL)
2587 {
2588 /* We started with at least one moving thread. This thread must have
2589 either stopped or reached the end of its execution history.
2590
2591 In the former case, EVENTING must not be NULL.
2592 In the latter case, NO_HISTORY must not be empty. */
53127008 2593 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2594
2595 /* We kept threads moving at the end of their execution history. Stop
2596 EVENTING now that we are going to report its stop. */
53127008 2597 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2598 eventing->btrace.flags &= ~BTHR_MOVE;
2599
2600 *status = btrace_step_no_history ();
2601 }
2602
2603 gdb_assert (eventing != NULL);
2604
2605 /* We kept threads replaying at the end of their execution history. Stop
2606 replaying EVENTING now that we are going to report its stop. */
2607 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2608
2609 /* Stop all other threads. */
5953356c 2610 if (!target_is_non_stop_p ())
53127008 2611 {
08036331 2612 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2613 record_btrace_cancel_resume (tp);
2614 }
52834460 2615
a6b5be76
MM
2616 /* In async mode, we need to announce further events. */
2617 if (target_is_async_p ())
2618 record_btrace_maybe_mark_async_event (moving, no_history);
2619
52834460 2620 /* Start record histories anew from the current position. */
e3cfc1c7 2621 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2622
2623 /* We moved the replay position but did not update registers. */
00431a78 2624 registers_changed_thread (eventing);
e3cfc1c7 2625
43792cf0
PA
2626 DEBUG ("wait ended by thread %s (%s): %s",
2627 print_thread_id (eventing),
a068643d 2628 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2629 target_waitstatus_to_string (status).c_str ());
52834460 2630
e3cfc1c7 2631 return eventing->ptid;
52834460
MM
2632}
2633
f6ac5f3d 2634/* The stop method of target record-btrace. */
6e4879f0 2635
f6ac5f3d
PA
2636void
2637record_btrace_target::stop (ptid_t ptid)
6e4879f0 2638{
a068643d 2639 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2640
2641 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2642 if ((::execution_direction != EXEC_REVERSE)
2643 && !record_is_replaying (minus_one_ptid))
6e4879f0 2644 {
b6a8c27b 2645 this->beneath ()->stop (ptid);
6e4879f0
MM
2646 }
2647 else
2648 {
08036331
PA
2649 for (thread_info *tp : all_non_exited_threads (ptid))
2650 {
2651 tp->btrace.flags &= ~BTHR_MOVE;
2652 tp->btrace.flags |= BTHR_STOP;
2653 }
6e4879f0
MM
2654 }
2655 }
2656
f6ac5f3d 2657/* The can_execute_reverse method of target record-btrace. */
52834460 2658
57810aa7 2659bool
f6ac5f3d 2660record_btrace_target::can_execute_reverse ()
52834460 2661{
57810aa7 2662 return true;
52834460
MM
2663}
2664
f6ac5f3d 2665/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2666
57810aa7 2667bool
f6ac5f3d 2668record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2669{
f6ac5f3d 2670 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2671 {
2672 struct thread_info *tp = inferior_thread ();
2673
2674 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2675 }
2676
b6a8c27b 2677 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2678}
2679
f6ac5f3d 2680/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2681 record-btrace. */
2682
57810aa7 2683bool
f6ac5f3d 2684record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2685{
f6ac5f3d 2686 if (record_is_replaying (minus_one_ptid))
57810aa7 2687 return true;
9e8915c6 2688
b6a8c27b 2689 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2690}
2691
f6ac5f3d 2692/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2693
57810aa7 2694bool
f6ac5f3d 2695record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2696{
f6ac5f3d 2697 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2698 {
2699 struct thread_info *tp = inferior_thread ();
2700
2701 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2702 }
2703
b6a8c27b 2704 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2705}
2706
f6ac5f3d 2707/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2708 record-btrace. */
2709
57810aa7 2710bool
f6ac5f3d 2711record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2712{
f6ac5f3d 2713 if (record_is_replaying (minus_one_ptid))
57810aa7 2714 return true;
52834460 2715
b6a8c27b 2716 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2717}
2718
f6ac5f3d 2719/* The update_thread_list method of target record-btrace. */
e2887aa3 2720
f6ac5f3d
PA
2721void
2722record_btrace_target::update_thread_list ()
e2887aa3 2723{
e8032dde 2724 /* We don't add or remove threads during replay. */
f6ac5f3d 2725 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2726 return;
2727
2728 /* Forward the request. */
b6a8c27b 2729 this->beneath ()->update_thread_list ();
e2887aa3
MM
2730}
2731
f6ac5f3d 2732/* The thread_alive method of target record-btrace. */
e2887aa3 2733
57810aa7 2734bool
f6ac5f3d 2735record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2736{
2737 /* We don't add or remove threads during replay. */
f6ac5f3d 2738 if (record_is_replaying (minus_one_ptid))
00431a78 2739 return true;
e2887aa3
MM
2740
2741 /* Forward the request. */
b6a8c27b 2742 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2743}
2744
066ce621
MM
2745/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2746 is stopped. */
2747
2748static void
2749record_btrace_set_replay (struct thread_info *tp,
2750 const struct btrace_insn_iterator *it)
2751{
2752 struct btrace_thread_info *btinfo;
2753
2754 btinfo = &tp->btrace;
2755
a0f1b963 2756 if (it == NULL)
52834460 2757 record_btrace_stop_replaying (tp);
066ce621
MM
2758 else
2759 {
2760 if (btinfo->replay == NULL)
52834460 2761 record_btrace_start_replaying (tp);
066ce621
MM
2762 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2763 return;
2764
2765 *btinfo->replay = *it;
00431a78 2766 registers_changed_thread (tp);
066ce621
MM
2767 }
2768
52834460
MM
2769 /* Start anew from the new replay position. */
2770 record_btrace_clear_histories (btinfo);
485668e5 2771
f2ffa92b
PA
2772 inferior_thread ()->suspend.stop_pc
2773 = regcache_read_pc (get_current_regcache ());
485668e5 2774 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2775}
2776
f6ac5f3d 2777/* The goto_record_begin method of target record-btrace. */
066ce621 2778
f6ac5f3d
PA
2779void
2780record_btrace_target::goto_record_begin ()
066ce621
MM
2781{
2782 struct thread_info *tp;
2783 struct btrace_insn_iterator begin;
2784
2785 tp = require_btrace_thread ();
2786
2787 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2788
2789 /* Skip gaps at the beginning of the trace. */
2790 while (btrace_insn_get (&begin) == NULL)
2791 {
2792 unsigned int steps;
2793
2794 steps = btrace_insn_next (&begin, 1);
2795 if (steps == 0)
2796 error (_("No trace."));
2797 }
2798
066ce621 2799 record_btrace_set_replay (tp, &begin);
066ce621
MM
2800}
2801
f6ac5f3d 2802/* The goto_record_end method of target record-btrace. */
066ce621 2803
f6ac5f3d
PA
2804void
2805record_btrace_target::goto_record_end ()
066ce621
MM
2806{
2807 struct thread_info *tp;
2808
2809 tp = require_btrace_thread ();
2810
2811 record_btrace_set_replay (tp, NULL);
066ce621
MM
2812}
2813
f6ac5f3d 2814/* The goto_record method of target record-btrace. */
066ce621 2815
f6ac5f3d
PA
2816void
2817record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2818{
2819 struct thread_info *tp;
2820 struct btrace_insn_iterator it;
2821 unsigned int number;
2822 int found;
2823
2824 number = insn;
2825
2826 /* Check for wrap-arounds. */
2827 if (number != insn)
2828 error (_("Instruction number out of range."));
2829
2830 tp = require_btrace_thread ();
2831
2832 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2833
2834 /* Check if the instruction could not be found or is a gap. */
2835 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2836 error (_("No such instruction."));
2837
2838 record_btrace_set_replay (tp, &it);
066ce621
MM
2839}
2840
f6ac5f3d 2841/* The record_stop_replaying method of target record-btrace. */
797094dd 2842
f6ac5f3d
PA
2843void
2844record_btrace_target::record_stop_replaying ()
797094dd 2845{
08036331 2846 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2847 record_btrace_stop_replaying (tp);
2848}
2849
f6ac5f3d 2850/* The execution_direction target method. */
70ad5bff 2851
f6ac5f3d
PA
2852enum exec_direction_kind
2853record_btrace_target::execution_direction ()
70ad5bff
MM
2854{
2855 return record_btrace_resume_exec_dir;
2856}
2857
f6ac5f3d 2858/* The prepare_to_generate_core target method. */
aef92902 2859
f6ac5f3d
PA
2860void
2861record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2862{
2863 record_btrace_generating_corefile = 1;
2864}
2865
f6ac5f3d 2866/* The done_generating_core target method. */
aef92902 2867
f6ac5f3d
PA
2868void
2869record_btrace_target::done_generating_core ()
aef92902
MM
2870{
2871 record_btrace_generating_corefile = 0;
2872}
2873
f4abbc16
MM
2874/* Start recording in BTS format. */
2875
2876static void
cdb34d4a 2877cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2878{
f4abbc16
MM
2879 if (args != NULL && *args != 0)
2880 error (_("Invalid argument."));
2881
2882 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2883
a70b8144 2884 try
492d29ea 2885 {
95a6b0a1 2886 execute_command ("target record-btrace", from_tty);
492d29ea 2887 }
230d2906 2888 catch (const gdb_exception &exception)
f4abbc16
MM
2889 {
2890 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2891 throw;
f4abbc16
MM
2892 }
2893}
2894
bc504a31 2895/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2896
2897static void
cdb34d4a 2898cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2899{
2900 if (args != NULL && *args != 0)
2901 error (_("Invalid argument."));
2902
b20a6524 2903 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2904
a70b8144 2905 try
492d29ea 2906 {
95a6b0a1 2907 execute_command ("target record-btrace", from_tty);
492d29ea 2908 }
230d2906 2909 catch (const gdb_exception &exception)
492d29ea
PA
2910 {
2911 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2912 throw;
492d29ea 2913 }
afedecd3
MM
2914}
2915
b20a6524
MM
2916/* Alias for "target record". */
2917
2918static void
981a3fb3 2919cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2920{
2921 if (args != NULL && *args != 0)
2922 error (_("Invalid argument."));
2923
2924 record_btrace_conf.format = BTRACE_FORMAT_PT;
2925
a70b8144 2926 try
b20a6524 2927 {
95a6b0a1 2928 execute_command ("target record-btrace", from_tty);
b20a6524 2929 }
230d2906 2930 catch (const gdb_exception &exception)
b20a6524
MM
2931 {
2932 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2933
a70b8144 2934 try
b20a6524 2935 {
95a6b0a1 2936 execute_command ("target record-btrace", from_tty);
b20a6524 2937 }
230d2906 2938 catch (const gdb_exception &ex)
b20a6524
MM
2939 {
2940 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2941 throw;
b20a6524 2942 }
b20a6524 2943 }
b20a6524
MM
2944}
2945
67b5c0c1
MM
2946/* The "set record btrace" command. */
2947
2948static void
981a3fb3 2949cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2950{
b85310e1
MM
2951 printf_unfiltered (_("\"set record btrace\" must be followed "
2952 "by an appropriate subcommand.\n"));
2953 help_list (set_record_btrace_cmdlist, "set record btrace ",
2954 all_commands, gdb_stdout);
67b5c0c1
MM
2955}
2956
2957/* The "show record btrace" command. */
2958
2959static void
981a3fb3 2960cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2961{
2962 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2963}
2964
2965/* The "show record btrace replay-memory-access" command. */
2966
2967static void
2968cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2969 struct cmd_list_element *c, const char *value)
2970{
2971 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2972 replay_memory_access);
2973}
2974
4a4495d6
MM
2975/* The "set record btrace cpu none" command. */
2976
2977static void
2978cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2979{
2980 if (args != nullptr && *args != 0)
2981 error (_("Trailing junk: '%s'."), args);
2982
2983 record_btrace_cpu_state = CS_NONE;
2984}
2985
2986/* The "set record btrace cpu auto" command. */
2987
2988static void
2989cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2990{
2991 if (args != nullptr && *args != 0)
2992 error (_("Trailing junk: '%s'."), args);
2993
2994 record_btrace_cpu_state = CS_AUTO;
2995}
2996
2997/* The "set record btrace cpu" command. */
2998
2999static void
3000cmd_set_record_btrace_cpu (const char *args, int from_tty)
3001{
3002 if (args == nullptr)
3003 args = "";
3004
3005 /* We use a hard-coded vendor string for now. */
3006 unsigned int family, model, stepping;
3007 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3008 &model, &l1, &stepping, &l2);
3009 if (matches == 3)
3010 {
3011 if (strlen (args) != l2)
3012 error (_("Trailing junk: '%s'."), args + l2);
3013 }
3014 else if (matches == 2)
3015 {
3016 if (strlen (args) != l1)
3017 error (_("Trailing junk: '%s'."), args + l1);
3018
3019 stepping = 0;
3020 }
3021 else
3022 error (_("Bad format. See \"help set record btrace cpu\"."));
3023
3024 if (USHRT_MAX < family)
3025 error (_("Cpu family too big."));
3026
3027 if (UCHAR_MAX < model)
3028 error (_("Cpu model too big."));
3029
3030 if (UCHAR_MAX < stepping)
3031 error (_("Cpu stepping too big."));
3032
3033 record_btrace_cpu.vendor = CV_INTEL;
3034 record_btrace_cpu.family = family;
3035 record_btrace_cpu.model = model;
3036 record_btrace_cpu.stepping = stepping;
3037
3038 record_btrace_cpu_state = CS_CPU;
3039}
3040
3041/* The "show record btrace cpu" command. */
3042
3043static void
3044cmd_show_record_btrace_cpu (const char *args, int from_tty)
3045{
4a4495d6
MM
3046 if (args != nullptr && *args != 0)
3047 error (_("Trailing junk: '%s'."), args);
3048
3049 switch (record_btrace_cpu_state)
3050 {
3051 case CS_AUTO:
3052 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3053 return;
3054
3055 case CS_NONE:
3056 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3057 return;
3058
3059 case CS_CPU:
3060 switch (record_btrace_cpu.vendor)
3061 {
3062 case CV_INTEL:
3063 if (record_btrace_cpu.stepping == 0)
3064 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3065 record_btrace_cpu.family,
3066 record_btrace_cpu.model);
3067 else
3068 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3069 record_btrace_cpu.family,
3070 record_btrace_cpu.model,
3071 record_btrace_cpu.stepping);
3072 return;
3073 }
3074 }
3075
3076 error (_("Internal error: bad cpu state."));
3077}
3078
3079/* The "s record btrace bts" command. */
d33501a5
MM
3080
3081static void
981a3fb3 3082cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3083{
3084 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3085 "by an appropriate subcommand.\n"));
d33501a5
MM
3086 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3087 all_commands, gdb_stdout);
3088}
3089
3090/* The "show record btrace bts" command. */
3091
3092static void
981a3fb3 3093cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3094{
3095 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3096}
3097
b20a6524
MM
3098/* The "set record btrace pt" command. */
3099
3100static void
981a3fb3 3101cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3102{
3103 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3104 "by an appropriate subcommand.\n"));
3105 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3106 all_commands, gdb_stdout);
3107}
3108
3109/* The "show record btrace pt" command. */
3110
3111static void
981a3fb3 3112cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3113{
3114 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3115}
3116
3117/* The "record bts buffer-size" show value function. */
3118
3119static void
3120show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3121 struct cmd_list_element *c,
3122 const char *value)
3123{
3124 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3125 value);
3126}
3127
3128/* The "record pt buffer-size" show value function. */
3129
3130static void
3131show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3132 struct cmd_list_element *c,
3133 const char *value)
3134{
3135 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3136 value);
3137}
3138
afedecd3
MM
3139/* Initialize btrace commands. */
3140
3141void
3142_initialize_record_btrace (void)
3143{
f4abbc16
MM
3144 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3145 _("Start branch trace recording."), &record_btrace_cmdlist,
3146 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3147 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3148
f4abbc16
MM
3149 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3150 _("\
3151Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3152The processor stores a from/to record for each branch into a cyclic buffer.\n\
3153This format may not be available on all processors."),
3154 &record_btrace_cmdlist);
3155 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3156
b20a6524
MM
3157 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3158 _("\
bc504a31 3159Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3160This format may not be available on all processors."),
3161 &record_btrace_cmdlist);
3162 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3163
67b5c0c1 3164 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
590042fc 3165 _("Set record options."), &set_record_btrace_cmdlist,
67b5c0c1
MM
3166 "set record btrace ", 0, &set_record_cmdlist);
3167
3168 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
590042fc 3169 _("Show record options."), &show_record_btrace_cmdlist,
67b5c0c1
MM
3170 "show record btrace ", 0, &show_record_cmdlist);
3171
3172 add_setshow_enum_cmd ("replay-memory-access", no_class,
3173 replay_memory_access_types, &replay_memory_access, _("\
3174Set what memory accesses are allowed during replay."), _("\
3175Show what memory accesses are allowed during replay."),
3176 _("Default is READ-ONLY.\n\n\
3177The btrace record target does not trace data.\n\
3178The memory therefore corresponds to the live target and not \
3179to the current replay position.\n\n\
3180When READ-ONLY, allow accesses to read-only memory during replay.\n\
3181When READ-WRITE, allow accesses to read-only and read-write memory during \
3182replay."),
3183 NULL, cmd_show_replay_memory_access,
3184 &set_record_btrace_cmdlist,
3185 &show_record_btrace_cmdlist);
3186
4a4495d6
MM
3187 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3188 _("\
3189Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3190The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3191For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3192When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3193The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3194When GDB does not support that cpu, this option can be used to enable\n\
3195workarounds for a similar cpu that GDB supports.\n\n\
3196When set to \"none\", errata workarounds are disabled."),
3197 &set_record_btrace_cpu_cmdlist,
590042fc 3198 "set record btrace cpu ", 1,
4a4495d6
MM
3199 &set_record_btrace_cmdlist);
3200
3201 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3202Automatically determine the cpu to be used for trace decode."),
3203 &set_record_btrace_cpu_cmdlist);
3204
3205 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3206Do not enable errata workarounds for trace decode."),
3207 &set_record_btrace_cpu_cmdlist);
3208
3209 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3210Show the cpu to be used for trace decode."),
3211 &show_record_btrace_cmdlist);
3212
d33501a5 3213 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
590042fc 3214 _("Set record btrace bts options."),
d33501a5
MM
3215 &set_record_btrace_bts_cmdlist,
3216 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3217
3218 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
590042fc 3219 _("Show record btrace bts options."),
d33501a5
MM
3220 &show_record_btrace_bts_cmdlist,
3221 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3222
3223 add_setshow_uinteger_cmd ("buffer-size", no_class,
3224 &record_btrace_conf.bts.size,
3225 _("Set the record/replay bts buffer size."),
3226 _("Show the record/replay bts buffer size."), _("\
3227When starting recording request a trace buffer of this size. \
3228The actual buffer size may differ from the requested size. \
3229Use \"info record\" to see the actual buffer size.\n\n\
3230Bigger buffers allow longer recording but also take more time to process \
3231the recorded execution trace.\n\n\
b20a6524
MM
3232The trace buffer size may not be changed while recording."), NULL,
3233 show_record_bts_buffer_size_value,
d33501a5
MM
3234 &set_record_btrace_bts_cmdlist,
3235 &show_record_btrace_bts_cmdlist);
3236
b20a6524 3237 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
590042fc 3238 _("Set record btrace pt options."),
b20a6524
MM
3239 &set_record_btrace_pt_cmdlist,
3240 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3241
3242 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
590042fc 3243 _("Show record btrace pt options."),
b20a6524
MM
3244 &show_record_btrace_pt_cmdlist,
3245 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3246
3247 add_setshow_uinteger_cmd ("buffer-size", no_class,
3248 &record_btrace_conf.pt.size,
3249 _("Set the record/replay pt buffer size."),
3250 _("Show the record/replay pt buffer size."), _("\
3251Bigger buffers allow longer recording but also take more time to process \
3252the recorded execution.\n\
3253The actual buffer size may differ from the requested size. Use \"info record\" \
3254to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3255 &set_record_btrace_pt_cmdlist,
3256 &show_record_btrace_pt_cmdlist);
3257
d9f719f1 3258 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3259
3260 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3261 xcalloc, xfree);
d33501a5
MM
3262
3263 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3264 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3265}