]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
gdb: Remove a non-const reference parameter
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
42a4f53d 3 Copyright (C) 2013-2019 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
268a13a5 41#include "gdbsupport/vec.h"
00431a78 42#include "inferior.h"
325fac50 43#include <algorithm>
0d12e84c 44#include "gdbarch.h"
afedecd3 45
d9f719f1
PA
46static const target_info record_btrace_target_info = {
47 "record-btrace",
48 N_("Branch tracing target"),
49 N_("Collect control-flow trace and provide the execution history.")
50};
51
afedecd3 52/* The target_ops of record-btrace. */
f6ac5f3d
PA
53
54class record_btrace_target final : public target_ops
55{
56public:
d9f719f1
PA
57 const target_info &info () const override
58 { return record_btrace_target_info; }
f6ac5f3d 59
66b4deae
PA
60 strata stratum () const override { return record_stratum; }
61
f6ac5f3d
PA
62 void close () override;
63 void async (int) override;
64
65 void detach (inferior *inf, int from_tty) override
66 { record_detach (this, inf, from_tty); }
67
68 void disconnect (const char *, int) override;
69
70 void mourn_inferior () override
71 { record_mourn_inferior (this); }
72
73 void kill () override
74 { record_kill (this); }
75
76 enum record_method record_method (ptid_t ptid) override;
77
78 void stop_recording () override;
79 void info_record () override;
80
81 void insn_history (int size, gdb_disassembly_flags flags) override;
82 void insn_history_from (ULONGEST from, int size,
83 gdb_disassembly_flags flags) override;
84 void insn_history_range (ULONGEST begin, ULONGEST end,
85 gdb_disassembly_flags flags) override;
86 void call_history (int size, record_print_flags flags) override;
87 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
88 override;
89 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
90 override;
91
57810aa7
PA
92 bool record_is_replaying (ptid_t ptid) override;
93 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
94 void record_stop_replaying () override;
95
96 enum target_xfer_status xfer_partial (enum target_object object,
97 const char *annex,
98 gdb_byte *readbuf,
99 const gdb_byte *writebuf,
100 ULONGEST offset, ULONGEST len,
101 ULONGEST *xfered_len) override;
102
103 int insert_breakpoint (struct gdbarch *,
104 struct bp_target_info *) override;
105 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
106 enum remove_bp_reason) override;
107
108 void fetch_registers (struct regcache *, int) override;
109
110 void store_registers (struct regcache *, int) override;
111 void prepare_to_store (struct regcache *) override;
112
113 const struct frame_unwind *get_unwinder () override;
114
115 const struct frame_unwind *get_tailcall_unwinder () override;
116
117 void commit_resume () override;
118 void resume (ptid_t, int, enum gdb_signal) override;
119 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
120
121 void stop (ptid_t) override;
122 void update_thread_list () override;
57810aa7 123 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
124 void goto_record_begin () override;
125 void goto_record_end () override;
126 void goto_record (ULONGEST insn) override;
127
57810aa7 128 bool can_execute_reverse () override;
f6ac5f3d 129
57810aa7
PA
130 bool stopped_by_sw_breakpoint () override;
131 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 132
57810aa7
PA
133 bool stopped_by_hw_breakpoint () override;
134 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
135
136 enum exec_direction_kind execution_direction () override;
137 void prepare_to_generate_core () override;
138 void done_generating_core () override;
139};
140
141static record_btrace_target record_btrace_ops;
142
143/* Initialize the record-btrace target ops. */
afedecd3 144
76727919
TT
145/* Token associated with a new-thread observer enabling branch tracing
146 for the new thread. */
3dcfdc58 147static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 148
67b5c0c1
MM
149/* Memory access types used in set/show record btrace replay-memory-access. */
150static const char replay_memory_access_read_only[] = "read-only";
151static const char replay_memory_access_read_write[] = "read-write";
152static const char *const replay_memory_access_types[] =
153{
154 replay_memory_access_read_only,
155 replay_memory_access_read_write,
156 NULL
157};
158
159/* The currently allowed replay memory access type. */
160static const char *replay_memory_access = replay_memory_access_read_only;
161
4a4495d6
MM
162/* The cpu state kinds. */
163enum record_btrace_cpu_state_kind
164{
165 CS_AUTO,
166 CS_NONE,
167 CS_CPU
168};
169
170/* The current cpu state. */
171static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
172
173/* The current cpu for trace decode. */
174static struct btrace_cpu record_btrace_cpu;
175
67b5c0c1
MM
176/* Command lists for "set/show record btrace". */
177static struct cmd_list_element *set_record_btrace_cmdlist;
178static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 179
70ad5bff
MM
180/* The execution direction of the last resume we got. See record-full.c. */
181static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
182
183/* The async event handler for reverse/replay execution. */
184static struct async_event_handler *record_btrace_async_inferior_event_handler;
185
aef92902
MM
186/* A flag indicating that we are currently generating a core file. */
187static int record_btrace_generating_corefile;
188
f4abbc16
MM
189/* The current branch trace configuration. */
190static struct btrace_config record_btrace_conf;
191
192/* Command list for "record btrace". */
193static struct cmd_list_element *record_btrace_cmdlist;
194
d33501a5
MM
195/* Command lists for "set/show record btrace bts". */
196static struct cmd_list_element *set_record_btrace_bts_cmdlist;
197static struct cmd_list_element *show_record_btrace_bts_cmdlist;
198
b20a6524
MM
199/* Command lists for "set/show record btrace pt". */
200static struct cmd_list_element *set_record_btrace_pt_cmdlist;
201static struct cmd_list_element *show_record_btrace_pt_cmdlist;
202
4a4495d6
MM
203/* Command list for "set record btrace cpu". */
204static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
205
afedecd3
MM
206/* Print a record-btrace debug message. Use do ... while (0) to avoid
207 ambiguities when used in if statements. */
208
209#define DEBUG(msg, args...) \
210 do \
211 { \
212 if (record_debug != 0) \
213 fprintf_unfiltered (gdb_stdlog, \
214 "[record-btrace] " msg "\n", ##args); \
215 } \
216 while (0)
217
218
4a4495d6
MM
219/* Return the cpu configured by the user. Returns NULL if the cpu was
220 configured as auto. */
221const struct btrace_cpu *
222record_btrace_get_cpu (void)
223{
224 switch (record_btrace_cpu_state)
225 {
226 case CS_AUTO:
227 return nullptr;
228
229 case CS_NONE:
230 record_btrace_cpu.vendor = CV_UNKNOWN;
231 /* Fall through. */
232 case CS_CPU:
233 return &record_btrace_cpu;
234 }
235
236 error (_("Internal error: bad record btrace cpu state."));
237}
238
afedecd3 239/* Update the branch trace for the current thread and return a pointer to its
066ce621 240 thread_info.
afedecd3
MM
241
242 Throws an error if there is no thread or no trace. This function never
243 returns NULL. */
244
066ce621
MM
245static struct thread_info *
246require_btrace_thread (void)
afedecd3 247{
afedecd3
MM
248 DEBUG ("require");
249
00431a78 250 if (inferior_ptid == null_ptid)
afedecd3
MM
251 error (_("No thread."));
252
00431a78
PA
253 thread_info *tp = inferior_thread ();
254
cd4007e4
MM
255 validate_registers_access ();
256
4a4495d6 257 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 258
6e07b1d2 259 if (btrace_is_empty (tp))
afedecd3
MM
260 error (_("No trace."));
261
066ce621
MM
262 return tp;
263}
264
265/* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271static struct btrace_thread_info *
272require_btrace (void)
273{
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
afedecd3
MM
279}
280
281/* Enable branch tracing for one thread. Warn on errors. */
282
283static void
284record_btrace_enable_warn (struct thread_info *tp)
285{
a70b8144 286 try
492d29ea
PA
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
230d2906 290 catch (const gdb_exception_error &error)
492d29ea 291 {
3d6e9d23 292 warning ("%s", error.what ());
492d29ea 293 }
afedecd3
MM
294}
295
afedecd3
MM
296/* Enable automatic tracing of new threads. */
297
298static void
299record_btrace_auto_enable (void)
300{
301 DEBUG ("attach thread observer");
302
76727919
TT
303 gdb::observers::new_thread.attach (record_btrace_enable_warn,
304 record_btrace_thread_observer_token);
afedecd3
MM
305}
306
307/* Disable automatic tracing of new threads. */
308
309static void
310record_btrace_auto_disable (void)
311{
afedecd3
MM
312 DEBUG ("detach thread observer");
313
76727919 314 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
315}
316
70ad5bff
MM
317/* The record-btrace async event handler function. */
318
319static void
320record_btrace_handle_async_inferior_event (gdb_client_data data)
321{
322 inferior_event_handler (INF_REG_EVENT, NULL);
323}
324
c0272db5
TW
325/* See record-btrace.h. */
326
327void
328record_btrace_push_target (void)
329{
330 const char *format;
331
332 record_btrace_auto_enable ();
333
334 push_target (&record_btrace_ops);
335
336 record_btrace_async_inferior_event_handler
337 = create_async_event_handler (record_btrace_handle_async_inferior_event,
338 NULL);
339 record_btrace_generating_corefile = 0;
340
341 format = btrace_format_short_string (record_btrace_conf.format);
76727919 342 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
343}
344
228f1508
SM
345/* Disable btrace on a set of threads on scope exit. */
346
347struct scoped_btrace_disable
348{
349 scoped_btrace_disable () = default;
350
351 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
352
353 ~scoped_btrace_disable ()
354 {
355 for (thread_info *tp : m_threads)
356 btrace_disable (tp);
357 }
358
359 void add_thread (thread_info *thread)
360 {
361 m_threads.push_front (thread);
362 }
363
364 void discard ()
365 {
366 m_threads.clear ();
367 }
368
369private:
370 std::forward_list<thread_info *> m_threads;
371};
372
d9f719f1 373/* Open target record-btrace. */
afedecd3 374
d9f719f1
PA
375static void
376record_btrace_target_open (const char *args, int from_tty)
afedecd3 377{
228f1508
SM
378 /* If we fail to enable btrace for one thread, disable it for the threads for
379 which it was successfully enabled. */
380 scoped_btrace_disable btrace_disable;
afedecd3
MM
381
382 DEBUG ("open");
383
8213266a 384 record_preopen ();
afedecd3
MM
385
386 if (!target_has_execution)
387 error (_("The program is not being run."));
388
08036331 389 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 390 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 391 {
f4abbc16 392 btrace_enable (tp, &record_btrace_conf);
afedecd3 393
228f1508 394 btrace_disable.add_thread (tp);
afedecd3
MM
395 }
396
c0272db5 397 record_btrace_push_target ();
afedecd3 398
228f1508 399 btrace_disable.discard ();
afedecd3
MM
400}
401
f6ac5f3d 402/* The stop_recording method of target record-btrace. */
afedecd3 403
f6ac5f3d
PA
404void
405record_btrace_target::stop_recording ()
afedecd3 406{
afedecd3
MM
407 DEBUG ("stop recording");
408
409 record_btrace_auto_disable ();
410
08036331 411 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
412 if (tp->btrace.target != NULL)
413 btrace_disable (tp);
414}
415
f6ac5f3d 416/* The disconnect method of target record-btrace. */
c0272db5 417
f6ac5f3d
PA
418void
419record_btrace_target::disconnect (const char *args,
420 int from_tty)
c0272db5 421{
b6a8c27b 422 struct target_ops *beneath = this->beneath ();
c0272db5
TW
423
424 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 425 unpush_target (this);
c0272db5
TW
426
427 /* Forward disconnect. */
f6ac5f3d 428 beneath->disconnect (args, from_tty);
c0272db5
TW
429}
430
f6ac5f3d 431/* The close method of target record-btrace. */
afedecd3 432
f6ac5f3d
PA
433void
434record_btrace_target::close ()
afedecd3 435{
70ad5bff
MM
436 if (record_btrace_async_inferior_event_handler != NULL)
437 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
438
99c819ee
MM
439 /* Make sure automatic recording gets disabled even if we did not stop
440 recording before closing the record-btrace target. */
441 record_btrace_auto_disable ();
442
568e808b
MM
443 /* We should have already stopped recording.
444 Tear down btrace in case we have not. */
08036331 445 for (thread_info *tp : all_non_exited_threads ())
568e808b 446 btrace_teardown (tp);
afedecd3
MM
447}
448
f6ac5f3d 449/* The async method of target record-btrace. */
b7d2e916 450
f6ac5f3d
PA
451void
452record_btrace_target::async (int enable)
b7d2e916 453{
6a3753b3 454 if (enable)
b7d2e916
PA
455 mark_async_event_handler (record_btrace_async_inferior_event_handler);
456 else
457 clear_async_event_handler (record_btrace_async_inferior_event_handler);
458
b6a8c27b 459 this->beneath ()->async (enable);
b7d2e916
PA
460}
461
d33501a5
MM
462/* Adjusts the size and returns a human readable size suffix. */
463
464static const char *
465record_btrace_adjust_size (unsigned int *size)
466{
467 unsigned int sz;
468
469 sz = *size;
470
471 if ((sz & ((1u << 30) - 1)) == 0)
472 {
473 *size = sz >> 30;
474 return "GB";
475 }
476 else if ((sz & ((1u << 20) - 1)) == 0)
477 {
478 *size = sz >> 20;
479 return "MB";
480 }
481 else if ((sz & ((1u << 10) - 1)) == 0)
482 {
483 *size = sz >> 10;
484 return "kB";
485 }
486 else
487 return "";
488}
489
490/* Print a BTS configuration. */
491
492static void
493record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
494{
495 const char *suffix;
496 unsigned int size;
497
498 size = conf->size;
499 if (size > 0)
500 {
501 suffix = record_btrace_adjust_size (&size);
502 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
503 }
504}
505
bc504a31 506/* Print an Intel Processor Trace configuration. */
b20a6524
MM
507
508static void
509record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
510{
511 const char *suffix;
512 unsigned int size;
513
514 size = conf->size;
515 if (size > 0)
516 {
517 suffix = record_btrace_adjust_size (&size);
518 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
519 }
520}
521
d33501a5
MM
522/* Print a branch tracing configuration. */
523
524static void
525record_btrace_print_conf (const struct btrace_config *conf)
526{
527 printf_unfiltered (_("Recording format: %s.\n"),
528 btrace_format_string (conf->format));
529
530 switch (conf->format)
531 {
532 case BTRACE_FORMAT_NONE:
533 return;
534
535 case BTRACE_FORMAT_BTS:
536 record_btrace_print_bts_conf (&conf->bts);
537 return;
b20a6524
MM
538
539 case BTRACE_FORMAT_PT:
540 record_btrace_print_pt_conf (&conf->pt);
541 return;
d33501a5
MM
542 }
543
544 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
545}
546
f6ac5f3d 547/* The info_record method of target record-btrace. */
afedecd3 548
f6ac5f3d
PA
549void
550record_btrace_target::info_record ()
afedecd3
MM
551{
552 struct btrace_thread_info *btinfo;
f4abbc16 553 const struct btrace_config *conf;
afedecd3 554 struct thread_info *tp;
31fd9caa 555 unsigned int insns, calls, gaps;
afedecd3
MM
556
557 DEBUG ("info");
558
559 tp = find_thread_ptid (inferior_ptid);
560 if (tp == NULL)
561 error (_("No thread."));
562
cd4007e4
MM
563 validate_registers_access ();
564
f4abbc16
MM
565 btinfo = &tp->btrace;
566
f6ac5f3d 567 conf = ::btrace_conf (btinfo);
f4abbc16 568 if (conf != NULL)
d33501a5 569 record_btrace_print_conf (conf);
f4abbc16 570
4a4495d6 571 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 572
23a7fe75
MM
573 insns = 0;
574 calls = 0;
31fd9caa 575 gaps = 0;
23a7fe75 576
6e07b1d2 577 if (!btrace_is_empty (tp))
23a7fe75
MM
578 {
579 struct btrace_call_iterator call;
580 struct btrace_insn_iterator insn;
581
582 btrace_call_end (&call, btinfo);
583 btrace_call_prev (&call, 1);
5de9129b 584 calls = btrace_call_number (&call);
23a7fe75
MM
585
586 btrace_insn_end (&insn, btinfo);
5de9129b 587 insns = btrace_insn_number (&insn);
31fd9caa 588
69090cee
TW
589 /* If the last instruction is not a gap, it is the current instruction
590 that is not actually part of the record. */
591 if (btrace_insn_get (&insn) != NULL)
592 insns -= 1;
31fd9caa
MM
593
594 gaps = btinfo->ngaps;
23a7fe75 595 }
afedecd3 596
31fd9caa 597 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 598 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
599 print_thread_id (tp),
600 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
601
602 if (btrace_is_replaying (tp))
603 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
604 btrace_insn_number (btinfo->replay));
afedecd3
MM
605}
606
31fd9caa
MM
607/* Print a decode error. */
608
609static void
610btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
611 enum btrace_format format)
612{
508352a9 613 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 614
112e8700 615 uiout->text (_("["));
508352a9
TW
616 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
617 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 618 {
112e8700
SM
619 uiout->text (_("decode error ("));
620 uiout->field_int ("errcode", errcode);
621 uiout->text (_("): "));
31fd9caa 622 }
112e8700
SM
623 uiout->text (errstr);
624 uiout->text (_("]\n"));
31fd9caa
MM
625}
626
f94cc897
MM
627/* A range of source lines. */
628
629struct btrace_line_range
630{
631 /* The symtab this line is from. */
632 struct symtab *symtab;
633
634 /* The first line (inclusive). */
635 int begin;
636
637 /* The last line (exclusive). */
638 int end;
639};
640
641/* Construct a line range. */
642
643static struct btrace_line_range
644btrace_mk_line_range (struct symtab *symtab, int begin, int end)
645{
646 struct btrace_line_range range;
647
648 range.symtab = symtab;
649 range.begin = begin;
650 range.end = end;
651
652 return range;
653}
654
655/* Add a line to a line range. */
656
657static struct btrace_line_range
658btrace_line_range_add (struct btrace_line_range range, int line)
659{
660 if (range.end <= range.begin)
661 {
662 /* This is the first entry. */
663 range.begin = line;
664 range.end = line + 1;
665 }
666 else if (line < range.begin)
667 range.begin = line;
668 else if (range.end < line)
669 range.end = line;
670
671 return range;
672}
673
674/* Return non-zero if RANGE is empty, zero otherwise. */
675
676static int
677btrace_line_range_is_empty (struct btrace_line_range range)
678{
679 return range.end <= range.begin;
680}
681
682/* Return non-zero if LHS contains RHS, zero otherwise. */
683
684static int
685btrace_line_range_contains_range (struct btrace_line_range lhs,
686 struct btrace_line_range rhs)
687{
688 return ((lhs.symtab == rhs.symtab)
689 && (lhs.begin <= rhs.begin)
690 && (rhs.end <= lhs.end));
691}
692
693/* Find the line range associated with PC. */
694
695static struct btrace_line_range
696btrace_find_line_range (CORE_ADDR pc)
697{
698 struct btrace_line_range range;
699 struct linetable_entry *lines;
700 struct linetable *ltable;
701 struct symtab *symtab;
702 int nlines, i;
703
704 symtab = find_pc_line_symtab (pc);
705 if (symtab == NULL)
706 return btrace_mk_line_range (NULL, 0, 0);
707
708 ltable = SYMTAB_LINETABLE (symtab);
709 if (ltable == NULL)
710 return btrace_mk_line_range (symtab, 0, 0);
711
712 nlines = ltable->nitems;
713 lines = ltable->item;
714 if (nlines <= 0)
715 return btrace_mk_line_range (symtab, 0, 0);
716
717 range = btrace_mk_line_range (symtab, 0, 0);
718 for (i = 0; i < nlines - 1; i++)
719 {
720 if ((lines[i].pc == pc) && (lines[i].line != 0))
721 range = btrace_line_range_add (range, lines[i].line);
722 }
723
724 return range;
725}
726
727/* Print source lines in LINES to UIOUT.
728
729 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
730 instructions corresponding to that source line. When printing a new source
731 line, we do the cleanups for the open chain and open a new cleanup chain for
732 the new source line. If the source line range in LINES is not empty, this
733 function will leave the cleanup chain for the last printed source line open
734 so instructions can be added to it. */
735
736static void
737btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
738 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
739 gdb::optional<ui_out_emit_list> *asm_list,
740 gdb_disassembly_flags flags)
f94cc897 741{
8d297bbf 742 print_source_lines_flags psl_flags;
f94cc897 743
f94cc897
MM
744 if (flags & DISASSEMBLY_FILENAME)
745 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
746
7ea78b59 747 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 748 {
7ea78b59 749 asm_list->reset ();
f94cc897 750
7ea78b59 751 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
752
753 print_source_lines (lines.symtab, line, line + 1, psl_flags);
754
7ea78b59 755 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
756 }
757}
758
afedecd3
MM
759/* Disassemble a section of the recorded instruction trace. */
760
761static void
23a7fe75 762btrace_insn_history (struct ui_out *uiout,
31fd9caa 763 const struct btrace_thread_info *btinfo,
23a7fe75 764 const struct btrace_insn_iterator *begin,
9a24775b
PA
765 const struct btrace_insn_iterator *end,
766 gdb_disassembly_flags flags)
afedecd3 767{
9a24775b
PA
768 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
769 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 770
f94cc897
MM
771 flags |= DISASSEMBLY_SPECULATIVE;
772
7ea78b59
SM
773 struct gdbarch *gdbarch = target_gdbarch ();
774 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 775
7ea78b59 776 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 777
7ea78b59
SM
778 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
779 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 780
8b172ce7
PA
781 gdb_pretty_print_disassembler disasm (gdbarch);
782
7ea78b59
SM
783 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
784 btrace_insn_next (&it, 1))
afedecd3 785 {
23a7fe75
MM
786 const struct btrace_insn *insn;
787
788 insn = btrace_insn_get (&it);
789
31fd9caa
MM
790 /* A NULL instruction indicates a gap in the trace. */
791 if (insn == NULL)
792 {
793 const struct btrace_config *conf;
794
795 conf = btrace_conf (btinfo);
afedecd3 796
31fd9caa
MM
797 /* We have trace so we must have a configuration. */
798 gdb_assert (conf != NULL);
799
69090cee
TW
800 uiout->field_fmt ("insn-number", "%u",
801 btrace_insn_number (&it));
802 uiout->text ("\t");
803
804 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
805 conf->format);
806 }
807 else
808 {
f94cc897 809 struct disasm_insn dinsn;
da8c46d2 810
f94cc897 811 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 812 {
f94cc897
MM
813 struct btrace_line_range lines;
814
815 lines = btrace_find_line_range (insn->pc);
816 if (!btrace_line_range_is_empty (lines)
817 && !btrace_line_range_contains_range (last_lines, lines))
818 {
7ea78b59
SM
819 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
820 flags);
f94cc897
MM
821 last_lines = lines;
822 }
7ea78b59 823 else if (!src_and_asm_tuple.has_value ())
f94cc897 824 {
7ea78b59
SM
825 gdb_assert (!asm_list.has_value ());
826
827 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
828
f94cc897 829 /* No source information. */
7ea78b59 830 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
831 }
832
7ea78b59
SM
833 gdb_assert (src_and_asm_tuple.has_value ());
834 gdb_assert (asm_list.has_value ());
da8c46d2 835 }
da8c46d2 836
f94cc897
MM
837 memset (&dinsn, 0, sizeof (dinsn));
838 dinsn.number = btrace_insn_number (&it);
839 dinsn.addr = insn->pc;
31fd9caa 840
da8c46d2 841 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 842 dinsn.is_speculative = 1;
da8c46d2 843
8b172ce7 844 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 845 }
afedecd3
MM
846 }
847}
848
f6ac5f3d 849/* The insn_history method of target record-btrace. */
afedecd3 850
f6ac5f3d
PA
851void
852record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
853{
854 struct btrace_thread_info *btinfo;
23a7fe75
MM
855 struct btrace_insn_history *history;
856 struct btrace_insn_iterator begin, end;
afedecd3 857 struct ui_out *uiout;
23a7fe75 858 unsigned int context, covered;
afedecd3
MM
859
860 uiout = current_uiout;
2e783024 861 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 862 context = abs (size);
afedecd3
MM
863 if (context == 0)
864 error (_("Bad record instruction-history-size."));
865
23a7fe75
MM
866 btinfo = require_btrace ();
867 history = btinfo->insn_history;
868 if (history == NULL)
afedecd3 869 {
07bbe694 870 struct btrace_insn_iterator *replay;
afedecd3 871
9a24775b 872 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 873
07bbe694
MM
874 /* If we're replaying, we start at the replay position. Otherwise, we
875 start at the tail of the trace. */
876 replay = btinfo->replay;
877 if (replay != NULL)
878 begin = *replay;
879 else
880 btrace_insn_end (&begin, btinfo);
881
882 /* We start from here and expand in the requested direction. Then we
883 expand in the other direction, as well, to fill up any remaining
884 context. */
885 end = begin;
886 if (size < 0)
887 {
888 /* We want the current position covered, as well. */
889 covered = btrace_insn_next (&end, 1);
890 covered += btrace_insn_prev (&begin, context - covered);
891 covered += btrace_insn_next (&end, context - covered);
892 }
893 else
894 {
895 covered = btrace_insn_next (&end, context);
896 covered += btrace_insn_prev (&begin, context - covered);
897 }
afedecd3
MM
898 }
899 else
900 {
23a7fe75
MM
901 begin = history->begin;
902 end = history->end;
afedecd3 903
9a24775b 904 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 905 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 906
23a7fe75
MM
907 if (size < 0)
908 {
909 end = begin;
910 covered = btrace_insn_prev (&begin, context);
911 }
912 else
913 {
914 begin = end;
915 covered = btrace_insn_next (&end, context);
916 }
afedecd3
MM
917 }
918
23a7fe75 919 if (covered > 0)
31fd9caa 920 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
921 else
922 {
923 if (size < 0)
924 printf_unfiltered (_("At the start of the branch trace record.\n"));
925 else
926 printf_unfiltered (_("At the end of the branch trace record.\n"));
927 }
afedecd3 928
23a7fe75 929 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
930}
931
f6ac5f3d 932/* The insn_history_range method of target record-btrace. */
afedecd3 933
f6ac5f3d
PA
934void
935record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
936 gdb_disassembly_flags flags)
afedecd3
MM
937{
938 struct btrace_thread_info *btinfo;
23a7fe75 939 struct btrace_insn_iterator begin, end;
afedecd3 940 struct ui_out *uiout;
23a7fe75
MM
941 unsigned int low, high;
942 int found;
afedecd3
MM
943
944 uiout = current_uiout;
2e783024 945 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
946 low = from;
947 high = to;
afedecd3 948
9a24775b 949 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
950
951 /* Check for wrap-arounds. */
23a7fe75 952 if (low != from || high != to)
afedecd3
MM
953 error (_("Bad range."));
954
0688d04e 955 if (high < low)
afedecd3
MM
956 error (_("Bad range."));
957
23a7fe75 958 btinfo = require_btrace ();
afedecd3 959
23a7fe75
MM
960 found = btrace_find_insn_by_number (&begin, btinfo, low);
961 if (found == 0)
962 error (_("Range out of bounds."));
afedecd3 963
23a7fe75
MM
964 found = btrace_find_insn_by_number (&end, btinfo, high);
965 if (found == 0)
0688d04e
MM
966 {
967 /* Silently truncate the range. */
968 btrace_insn_end (&end, btinfo);
969 }
970 else
971 {
972 /* We want both begin and end to be inclusive. */
973 btrace_insn_next (&end, 1);
974 }
afedecd3 975
31fd9caa 976 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 977 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
978}
979
f6ac5f3d 980/* The insn_history_from method of target record-btrace. */
afedecd3 981
f6ac5f3d
PA
982void
983record_btrace_target::insn_history_from (ULONGEST from, int size,
984 gdb_disassembly_flags flags)
afedecd3
MM
985{
986 ULONGEST begin, end, context;
987
988 context = abs (size);
0688d04e
MM
989 if (context == 0)
990 error (_("Bad record instruction-history-size."));
afedecd3
MM
991
992 if (size < 0)
993 {
994 end = from;
995
996 if (from < context)
997 begin = 0;
998 else
0688d04e 999 begin = from - context + 1;
afedecd3
MM
1000 }
1001 else
1002 {
1003 begin = from;
0688d04e 1004 end = from + context - 1;
afedecd3
MM
1005
1006 /* Check for wrap-around. */
1007 if (end < begin)
1008 end = ULONGEST_MAX;
1009 }
1010
f6ac5f3d 1011 insn_history_range (begin, end, flags);
afedecd3
MM
1012}
1013
1014/* Print the instruction number range for a function call history line. */
1015
1016static void
23a7fe75
MM
1017btrace_call_history_insn_range (struct ui_out *uiout,
1018 const struct btrace_function *bfun)
afedecd3 1019{
7acbe133
MM
1020 unsigned int begin, end, size;
1021
0860c437 1022 size = bfun->insn.size ();
7acbe133 1023 gdb_assert (size > 0);
afedecd3 1024
23a7fe75 1025 begin = bfun->insn_offset;
7acbe133 1026 end = begin + size - 1;
afedecd3 1027
1f77b012 1028 uiout->field_unsigned ("insn begin", begin);
112e8700 1029 uiout->text (",");
1f77b012 1030 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1031}
1032
ce0dfbea
MM
1033/* Compute the lowest and highest source line for the instructions in BFUN
1034 and return them in PBEGIN and PEND.
1035 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1036 result from inlining or macro expansion. */
1037
1038static void
1039btrace_compute_src_line_range (const struct btrace_function *bfun,
1040 int *pbegin, int *pend)
1041{
ce0dfbea
MM
1042 struct symtab *symtab;
1043 struct symbol *sym;
ce0dfbea
MM
1044 int begin, end;
1045
1046 begin = INT_MAX;
1047 end = INT_MIN;
1048
1049 sym = bfun->sym;
1050 if (sym == NULL)
1051 goto out;
1052
1053 symtab = symbol_symtab (sym);
1054
0860c437 1055 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1056 {
1057 struct symtab_and_line sal;
1058
0860c437 1059 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1060 if (sal.symtab != symtab || sal.line == 0)
1061 continue;
1062
325fac50
PA
1063 begin = std::min (begin, sal.line);
1064 end = std::max (end, sal.line);
ce0dfbea
MM
1065 }
1066
1067 out:
1068 *pbegin = begin;
1069 *pend = end;
1070}
1071
afedecd3
MM
1072/* Print the source line information for a function call history line. */
1073
1074static void
23a7fe75
MM
1075btrace_call_history_src_line (struct ui_out *uiout,
1076 const struct btrace_function *bfun)
afedecd3
MM
1077{
1078 struct symbol *sym;
23a7fe75 1079 int begin, end;
afedecd3
MM
1080
1081 sym = bfun->sym;
1082 if (sym == NULL)
1083 return;
1084
112e8700 1085 uiout->field_string ("file",
cbe56571
TT
1086 symtab_to_filename_for_display (symbol_symtab (sym)),
1087 ui_out_style_kind::FILE);
afedecd3 1088
ce0dfbea 1089 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1090 if (end < begin)
afedecd3
MM
1091 return;
1092
112e8700
SM
1093 uiout->text (":");
1094 uiout->field_int ("min line", begin);
afedecd3 1095
23a7fe75 1096 if (end == begin)
afedecd3
MM
1097 return;
1098
112e8700
SM
1099 uiout->text (",");
1100 uiout->field_int ("max line", end);
afedecd3
MM
1101}
1102
0b722aec
MM
1103/* Get the name of a branch trace function. */
1104
1105static const char *
1106btrace_get_bfun_name (const struct btrace_function *bfun)
1107{
1108 struct minimal_symbol *msym;
1109 struct symbol *sym;
1110
1111 if (bfun == NULL)
1112 return "??";
1113
1114 msym = bfun->msym;
1115 sym = bfun->sym;
1116
1117 if (sym != NULL)
1118 return SYMBOL_PRINT_NAME (sym);
1119 else if (msym != NULL)
efd66ac6 1120 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1121 else
1122 return "??";
1123}
1124
afedecd3
MM
1125/* Disassemble a section of the recorded function trace. */
1126
1127static void
23a7fe75 1128btrace_call_history (struct ui_out *uiout,
8710b709 1129 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1130 const struct btrace_call_iterator *begin,
1131 const struct btrace_call_iterator *end,
8d297bbf 1132 int int_flags)
afedecd3 1133{
23a7fe75 1134 struct btrace_call_iterator it;
8d297bbf 1135 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1136
8d297bbf 1137 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1138 btrace_call_number (end));
afedecd3 1139
23a7fe75 1140 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1141 {
23a7fe75
MM
1142 const struct btrace_function *bfun;
1143 struct minimal_symbol *msym;
1144 struct symbol *sym;
1145
1146 bfun = btrace_call_get (&it);
23a7fe75 1147 sym = bfun->sym;
0b722aec 1148 msym = bfun->msym;
23a7fe75 1149
afedecd3 1150 /* Print the function index. */
1f77b012 1151 uiout->field_unsigned ("index", bfun->number);
112e8700 1152 uiout->text ("\t");
afedecd3 1153
31fd9caa
MM
1154 /* Indicate gaps in the trace. */
1155 if (bfun->errcode != 0)
1156 {
1157 const struct btrace_config *conf;
1158
1159 conf = btrace_conf (btinfo);
1160
1161 /* We have trace so we must have a configuration. */
1162 gdb_assert (conf != NULL);
1163
1164 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1165
1166 continue;
1167 }
1168
8710b709
MM
1169 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1170 {
1171 int level = bfun->level + btinfo->level, i;
1172
1173 for (i = 0; i < level; ++i)
112e8700 1174 uiout->text (" ");
8710b709
MM
1175 }
1176
1177 if (sym != NULL)
cbe56571
TT
1178 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym),
1179 ui_out_style_kind::FUNCTION);
8710b709 1180 else if (msym != NULL)
cbe56571
TT
1181 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym),
1182 ui_out_style_kind::FUNCTION);
112e8700 1183 else if (!uiout->is_mi_like_p ())
cbe56571
TT
1184 uiout->field_string ("function", "??",
1185 ui_out_style_kind::FUNCTION);
8710b709 1186
1e038f67 1187 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1188 {
112e8700 1189 uiout->text (_("\tinst "));
23a7fe75 1190 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1191 }
1192
1e038f67 1193 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1194 {
112e8700 1195 uiout->text (_("\tat "));
23a7fe75 1196 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1197 }
1198
112e8700 1199 uiout->text ("\n");
afedecd3
MM
1200 }
1201}
1202
f6ac5f3d 1203/* The call_history method of target record-btrace. */
afedecd3 1204
f6ac5f3d
PA
1205void
1206record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1207{
1208 struct btrace_thread_info *btinfo;
23a7fe75
MM
1209 struct btrace_call_history *history;
1210 struct btrace_call_iterator begin, end;
afedecd3 1211 struct ui_out *uiout;
23a7fe75 1212 unsigned int context, covered;
afedecd3
MM
1213
1214 uiout = current_uiout;
2e783024 1215 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1216 context = abs (size);
afedecd3
MM
1217 if (context == 0)
1218 error (_("Bad record function-call-history-size."));
1219
23a7fe75
MM
1220 btinfo = require_btrace ();
1221 history = btinfo->call_history;
1222 if (history == NULL)
afedecd3 1223 {
07bbe694 1224 struct btrace_insn_iterator *replay;
afedecd3 1225
0cb7c7b0 1226 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1227
07bbe694
MM
1228 /* If we're replaying, we start at the replay position. Otherwise, we
1229 start at the tail of the trace. */
1230 replay = btinfo->replay;
1231 if (replay != NULL)
1232 {
07bbe694 1233 begin.btinfo = btinfo;
a0f1b963 1234 begin.index = replay->call_index;
07bbe694
MM
1235 }
1236 else
1237 btrace_call_end (&begin, btinfo);
1238
1239 /* We start from here and expand in the requested direction. Then we
1240 expand in the other direction, as well, to fill up any remaining
1241 context. */
1242 end = begin;
1243 if (size < 0)
1244 {
1245 /* We want the current position covered, as well. */
1246 covered = btrace_call_next (&end, 1);
1247 covered += btrace_call_prev (&begin, context - covered);
1248 covered += btrace_call_next (&end, context - covered);
1249 }
1250 else
1251 {
1252 covered = btrace_call_next (&end, context);
1253 covered += btrace_call_prev (&begin, context- covered);
1254 }
afedecd3
MM
1255 }
1256 else
1257 {
23a7fe75
MM
1258 begin = history->begin;
1259 end = history->end;
afedecd3 1260
0cb7c7b0 1261 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1262 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1263
23a7fe75
MM
1264 if (size < 0)
1265 {
1266 end = begin;
1267 covered = btrace_call_prev (&begin, context);
1268 }
1269 else
1270 {
1271 begin = end;
1272 covered = btrace_call_next (&end, context);
1273 }
afedecd3
MM
1274 }
1275
23a7fe75 1276 if (covered > 0)
8710b709 1277 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1278 else
1279 {
1280 if (size < 0)
1281 printf_unfiltered (_("At the start of the branch trace record.\n"));
1282 else
1283 printf_unfiltered (_("At the end of the branch trace record.\n"));
1284 }
afedecd3 1285
23a7fe75 1286 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1287}
1288
f6ac5f3d 1289/* The call_history_range method of target record-btrace. */
afedecd3 1290
f6ac5f3d
PA
1291void
1292record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1293 record_print_flags flags)
afedecd3
MM
1294{
1295 struct btrace_thread_info *btinfo;
23a7fe75 1296 struct btrace_call_iterator begin, end;
afedecd3 1297 struct ui_out *uiout;
23a7fe75
MM
1298 unsigned int low, high;
1299 int found;
afedecd3
MM
1300
1301 uiout = current_uiout;
2e783024 1302 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1303 low = from;
1304 high = to;
afedecd3 1305
0cb7c7b0 1306 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1307
1308 /* Check for wrap-arounds. */
23a7fe75 1309 if (low != from || high != to)
afedecd3
MM
1310 error (_("Bad range."));
1311
0688d04e 1312 if (high < low)
afedecd3
MM
1313 error (_("Bad range."));
1314
23a7fe75 1315 btinfo = require_btrace ();
afedecd3 1316
23a7fe75
MM
1317 found = btrace_find_call_by_number (&begin, btinfo, low);
1318 if (found == 0)
1319 error (_("Range out of bounds."));
afedecd3 1320
23a7fe75
MM
1321 found = btrace_find_call_by_number (&end, btinfo, high);
1322 if (found == 0)
0688d04e
MM
1323 {
1324 /* Silently truncate the range. */
1325 btrace_call_end (&end, btinfo);
1326 }
1327 else
1328 {
1329 /* We want both begin and end to be inclusive. */
1330 btrace_call_next (&end, 1);
1331 }
afedecd3 1332
8710b709 1333 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1334 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1335}
1336
f6ac5f3d 1337/* The call_history_from method of target record-btrace. */
afedecd3 1338
f6ac5f3d
PA
1339void
1340record_btrace_target::call_history_from (ULONGEST from, int size,
1341 record_print_flags flags)
afedecd3
MM
1342{
1343 ULONGEST begin, end, context;
1344
1345 context = abs (size);
0688d04e
MM
1346 if (context == 0)
1347 error (_("Bad record function-call-history-size."));
afedecd3
MM
1348
1349 if (size < 0)
1350 {
1351 end = from;
1352
1353 if (from < context)
1354 begin = 0;
1355 else
0688d04e 1356 begin = from - context + 1;
afedecd3
MM
1357 }
1358 else
1359 {
1360 begin = from;
0688d04e 1361 end = from + context - 1;
afedecd3
MM
1362
1363 /* Check for wrap-around. */
1364 if (end < begin)
1365 end = ULONGEST_MAX;
1366 }
1367
f6ac5f3d 1368 call_history_range ( begin, end, flags);
afedecd3
MM
1369}
1370
f6ac5f3d 1371/* The record_method method of target record-btrace. */
b158a20f 1372
f6ac5f3d
PA
1373enum record_method
1374record_btrace_target::record_method (ptid_t ptid)
b158a20f 1375{
b158a20f
TW
1376 struct thread_info * const tp = find_thread_ptid (ptid);
1377
1378 if (tp == NULL)
1379 error (_("No thread."));
1380
1381 if (tp->btrace.target == NULL)
1382 return RECORD_METHOD_NONE;
1383
1384 return RECORD_METHOD_BTRACE;
1385}
1386
f6ac5f3d 1387/* The record_is_replaying method of target record-btrace. */
07bbe694 1388
57810aa7 1389bool
f6ac5f3d 1390record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1391{
08036331
PA
1392 for (thread_info *tp : all_non_exited_threads (ptid))
1393 if (btrace_is_replaying (tp))
57810aa7 1394 return true;
07bbe694 1395
57810aa7 1396 return false;
07bbe694
MM
1397}
1398
f6ac5f3d 1399/* The record_will_replay method of target record-btrace. */
7ff27e9b 1400
57810aa7 1401bool
f6ac5f3d 1402record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1403{
f6ac5f3d 1404 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1405}
1406
f6ac5f3d 1407/* The xfer_partial method of target record-btrace. */
633785ff 1408
f6ac5f3d
PA
1409enum target_xfer_status
1410record_btrace_target::xfer_partial (enum target_object object,
1411 const char *annex, gdb_byte *readbuf,
1412 const gdb_byte *writebuf, ULONGEST offset,
1413 ULONGEST len, ULONGEST *xfered_len)
633785ff 1414{
633785ff 1415 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1416 if (replay_memory_access == replay_memory_access_read_only
aef92902 1417 && !record_btrace_generating_corefile
f6ac5f3d 1418 && record_is_replaying (inferior_ptid))
633785ff
MM
1419 {
1420 switch (object)
1421 {
1422 case TARGET_OBJECT_MEMORY:
1423 {
1424 struct target_section *section;
1425
1426 /* We do not allow writing memory in general. */
1427 if (writebuf != NULL)
9b409511
YQ
1428 {
1429 *xfered_len = len;
bc113b4e 1430 return TARGET_XFER_UNAVAILABLE;
9b409511 1431 }
633785ff
MM
1432
1433 /* We allow reading readonly memory. */
f6ac5f3d 1434 section = target_section_by_addr (this, offset);
633785ff
MM
1435 if (section != NULL)
1436 {
1437 /* Check if the section we found is readonly. */
1438 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1439 section->the_bfd_section)
1440 & SEC_READONLY) != 0)
1441 {
1442 /* Truncate the request to fit into this section. */
325fac50 1443 len = std::min (len, section->endaddr - offset);
633785ff
MM
1444 break;
1445 }
1446 }
1447
9b409511 1448 *xfered_len = len;
bc113b4e 1449 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1450 }
1451 }
1452 }
1453
1454 /* Forward the request. */
b6a8c27b
PA
1455 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1456 offset, len, xfered_len);
633785ff
MM
1457}
1458
f6ac5f3d 1459/* The insert_breakpoint method of target record-btrace. */
633785ff 1460
f6ac5f3d
PA
1461int
1462record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1463 struct bp_target_info *bp_tgt)
633785ff 1464{
67b5c0c1
MM
1465 const char *old;
1466 int ret;
633785ff
MM
1467
1468 /* Inserting breakpoints requires accessing memory. Allow it for the
1469 duration of this function. */
67b5c0c1
MM
1470 old = replay_memory_access;
1471 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1472
1473 ret = 0;
a70b8144 1474 try
492d29ea 1475 {
b6a8c27b 1476 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1477 }
230d2906 1478 catch (const gdb_exception &except)
492d29ea 1479 {
6c63c96a 1480 replay_memory_access = old;
eedc3f4f 1481 throw;
492d29ea 1482 }
6c63c96a 1483 replay_memory_access = old;
633785ff
MM
1484
1485 return ret;
1486}
1487
f6ac5f3d 1488/* The remove_breakpoint method of target record-btrace. */
633785ff 1489
f6ac5f3d
PA
1490int
1491record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1492 struct bp_target_info *bp_tgt,
1493 enum remove_bp_reason reason)
633785ff 1494{
67b5c0c1
MM
1495 const char *old;
1496 int ret;
633785ff
MM
1497
1498 /* Removing breakpoints requires accessing memory. Allow it for the
1499 duration of this function. */
67b5c0c1
MM
1500 old = replay_memory_access;
1501 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1502
1503 ret = 0;
a70b8144 1504 try
492d29ea 1505 {
b6a8c27b 1506 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1507 }
230d2906 1508 catch (const gdb_exception &except)
492d29ea 1509 {
6c63c96a 1510 replay_memory_access = old;
eedc3f4f 1511 throw;
492d29ea 1512 }
6c63c96a 1513 replay_memory_access = old;
633785ff
MM
1514
1515 return ret;
1516}
1517
f6ac5f3d 1518/* The fetch_registers method of target record-btrace. */
1f3ef581 1519
f6ac5f3d
PA
1520void
1521record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1522{
1523 struct btrace_insn_iterator *replay;
1524 struct thread_info *tp;
1525
222312d3 1526 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1527 gdb_assert (tp != NULL);
1528
1529 replay = tp->btrace.replay;
aef92902 1530 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1531 {
1532 const struct btrace_insn *insn;
1533 struct gdbarch *gdbarch;
1534 int pcreg;
1535
ac7936df 1536 gdbarch = regcache->arch ();
1f3ef581
MM
1537 pcreg = gdbarch_pc_regnum (gdbarch);
1538 if (pcreg < 0)
1539 return;
1540
1541 /* We can only provide the PC register. */
1542 if (regno >= 0 && regno != pcreg)
1543 return;
1544
1545 insn = btrace_insn_get (replay);
1546 gdb_assert (insn != NULL);
1547
73e1c03f 1548 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1549 }
1550 else
b6a8c27b 1551 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1552}
1553
f6ac5f3d 1554/* The store_registers method of target record-btrace. */
1f3ef581 1555
f6ac5f3d
PA
1556void
1557record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1558{
a52eab48 1559 if (!record_btrace_generating_corefile
222312d3 1560 && record_is_replaying (regcache->ptid ()))
4d10e986 1561 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1562
1563 gdb_assert (may_write_registers != 0);
1564
b6a8c27b 1565 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1566}
1567
f6ac5f3d 1568/* The prepare_to_store method of target record-btrace. */
1f3ef581 1569
f6ac5f3d
PA
1570void
1571record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1572{
a52eab48 1573 if (!record_btrace_generating_corefile
222312d3 1574 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1575 return;
1576
b6a8c27b 1577 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1578}
1579
0b722aec
MM
1580/* The branch trace frame cache. */
1581
1582struct btrace_frame_cache
1583{
1584 /* The thread. */
1585 struct thread_info *tp;
1586
1587 /* The frame info. */
1588 struct frame_info *frame;
1589
1590 /* The branch trace function segment. */
1591 const struct btrace_function *bfun;
1592};
1593
1594/* A struct btrace_frame_cache hash table indexed by NEXT. */
1595
1596static htab_t bfcache;
1597
1598/* hash_f for htab_create_alloc of bfcache. */
1599
1600static hashval_t
1601bfcache_hash (const void *arg)
1602{
19ba03f4
SM
1603 const struct btrace_frame_cache *cache
1604 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1605
1606 return htab_hash_pointer (cache->frame);
1607}
1608
1609/* eq_f for htab_create_alloc of bfcache. */
1610
1611static int
1612bfcache_eq (const void *arg1, const void *arg2)
1613{
19ba03f4
SM
1614 const struct btrace_frame_cache *cache1
1615 = (const struct btrace_frame_cache *) arg1;
1616 const struct btrace_frame_cache *cache2
1617 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1618
1619 return cache1->frame == cache2->frame;
1620}
1621
1622/* Create a new btrace frame cache. */
1623
1624static struct btrace_frame_cache *
1625bfcache_new (struct frame_info *frame)
1626{
1627 struct btrace_frame_cache *cache;
1628 void **slot;
1629
1630 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1631 cache->frame = frame;
1632
1633 slot = htab_find_slot (bfcache, cache, INSERT);
1634 gdb_assert (*slot == NULL);
1635 *slot = cache;
1636
1637 return cache;
1638}
1639
1640/* Extract the branch trace function from a branch trace frame. */
1641
1642static const struct btrace_function *
1643btrace_get_frame_function (struct frame_info *frame)
1644{
1645 const struct btrace_frame_cache *cache;
0b722aec
MM
1646 struct btrace_frame_cache pattern;
1647 void **slot;
1648
1649 pattern.frame = frame;
1650
1651 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1652 if (slot == NULL)
1653 return NULL;
1654
19ba03f4 1655 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1656 return cache->bfun;
1657}
1658
cecac1ab
MM
1659/* Implement stop_reason method for record_btrace_frame_unwind. */
1660
1661static enum unwind_stop_reason
1662record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1663 void **this_cache)
1664{
0b722aec
MM
1665 const struct btrace_frame_cache *cache;
1666 const struct btrace_function *bfun;
1667
19ba03f4 1668 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1669 bfun = cache->bfun;
1670 gdb_assert (bfun != NULL);
1671
42bfe59e 1672 if (bfun->up == 0)
0b722aec
MM
1673 return UNWIND_UNAVAILABLE;
1674
1675 return UNWIND_NO_REASON;
cecac1ab
MM
1676}
1677
1678/* Implement this_id method for record_btrace_frame_unwind. */
1679
1680static void
1681record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1682 struct frame_id *this_id)
1683{
0b722aec
MM
1684 const struct btrace_frame_cache *cache;
1685 const struct btrace_function *bfun;
4aeb0dfc 1686 struct btrace_call_iterator it;
0b722aec
MM
1687 CORE_ADDR code, special;
1688
19ba03f4 1689 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1690
1691 bfun = cache->bfun;
1692 gdb_assert (bfun != NULL);
1693
4aeb0dfc
TW
1694 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1695 bfun = btrace_call_get (&it);
0b722aec
MM
1696
1697 code = get_frame_func (this_frame);
1698 special = bfun->number;
1699
1700 *this_id = frame_id_build_unavailable_stack_special (code, special);
1701
1702 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1703 btrace_get_bfun_name (cache->bfun),
1704 core_addr_to_string_nz (this_id->code_addr),
1705 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1706}
1707
1708/* Implement prev_register method for record_btrace_frame_unwind. */
1709
1710static struct value *
1711record_btrace_frame_prev_register (struct frame_info *this_frame,
1712 void **this_cache,
1713 int regnum)
1714{
0b722aec
MM
1715 const struct btrace_frame_cache *cache;
1716 const struct btrace_function *bfun, *caller;
42bfe59e 1717 struct btrace_call_iterator it;
0b722aec
MM
1718 struct gdbarch *gdbarch;
1719 CORE_ADDR pc;
1720 int pcreg;
1721
1722 gdbarch = get_frame_arch (this_frame);
1723 pcreg = gdbarch_pc_regnum (gdbarch);
1724 if (pcreg < 0 || regnum != pcreg)
1725 throw_error (NOT_AVAILABLE_ERROR,
1726 _("Registers are not available in btrace record history"));
1727
19ba03f4 1728 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1729 bfun = cache->bfun;
1730 gdb_assert (bfun != NULL);
1731
42bfe59e 1732 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1733 throw_error (NOT_AVAILABLE_ERROR,
1734 _("No caller in btrace record history"));
1735
42bfe59e
TW
1736 caller = btrace_call_get (&it);
1737
0b722aec 1738 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1739 pc = caller->insn.front ().pc;
0b722aec
MM
1740 else
1741 {
0860c437 1742 pc = caller->insn.back ().pc;
0b722aec
MM
1743 pc += gdb_insn_length (gdbarch, pc);
1744 }
1745
1746 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1747 btrace_get_bfun_name (bfun), bfun->level,
1748 core_addr_to_string_nz (pc));
1749
1750 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1751}
1752
1753/* Implement sniffer method for record_btrace_frame_unwind. */
1754
1755static int
1756record_btrace_frame_sniffer (const struct frame_unwind *self,
1757 struct frame_info *this_frame,
1758 void **this_cache)
1759{
0b722aec
MM
1760 const struct btrace_function *bfun;
1761 struct btrace_frame_cache *cache;
cecac1ab 1762 struct thread_info *tp;
0b722aec 1763 struct frame_info *next;
cecac1ab
MM
1764
1765 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1766 tp = inferior_thread ();
cecac1ab 1767
0b722aec
MM
1768 bfun = NULL;
1769 next = get_next_frame (this_frame);
1770 if (next == NULL)
1771 {
1772 const struct btrace_insn_iterator *replay;
1773
1774 replay = tp->btrace.replay;
1775 if (replay != NULL)
08c3f6d2 1776 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1777 }
1778 else
1779 {
1780 const struct btrace_function *callee;
42bfe59e 1781 struct btrace_call_iterator it;
0b722aec
MM
1782
1783 callee = btrace_get_frame_function (next);
42bfe59e
TW
1784 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1785 return 0;
1786
1787 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1788 return 0;
1789
1790 bfun = btrace_call_get (&it);
0b722aec
MM
1791 }
1792
1793 if (bfun == NULL)
1794 return 0;
1795
1796 DEBUG ("[frame] sniffed frame for %s on level %d",
1797 btrace_get_bfun_name (bfun), bfun->level);
1798
1799 /* This is our frame. Initialize the frame cache. */
1800 cache = bfcache_new (this_frame);
1801 cache->tp = tp;
1802 cache->bfun = bfun;
1803
1804 *this_cache = cache;
1805 return 1;
1806}
1807
1808/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1809
1810static int
1811record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1812 struct frame_info *this_frame,
1813 void **this_cache)
1814{
1815 const struct btrace_function *bfun, *callee;
1816 struct btrace_frame_cache *cache;
42bfe59e 1817 struct btrace_call_iterator it;
0b722aec 1818 struct frame_info *next;
42bfe59e 1819 struct thread_info *tinfo;
0b722aec
MM
1820
1821 next = get_next_frame (this_frame);
1822 if (next == NULL)
1823 return 0;
1824
1825 callee = btrace_get_frame_function (next);
1826 if (callee == NULL)
1827 return 0;
1828
1829 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1830 return 0;
1831
00431a78 1832 tinfo = inferior_thread ();
42bfe59e 1833 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1834 return 0;
1835
42bfe59e
TW
1836 bfun = btrace_call_get (&it);
1837
0b722aec
MM
1838 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1839 btrace_get_bfun_name (bfun), bfun->level);
1840
1841 /* This is our frame. Initialize the frame cache. */
1842 cache = bfcache_new (this_frame);
42bfe59e 1843 cache->tp = tinfo;
0b722aec
MM
1844 cache->bfun = bfun;
1845
1846 *this_cache = cache;
1847 return 1;
1848}
1849
1850static void
1851record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1852{
1853 struct btrace_frame_cache *cache;
1854 void **slot;
1855
19ba03f4 1856 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1857
1858 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1859 gdb_assert (slot != NULL);
1860
1861 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1862}
1863
1864/* btrace recording does not store previous memory content, neither the stack
1865 frames content. Any unwinding would return errorneous results as the stack
1866 contents no longer matches the changed PC value restored from history.
1867 Therefore this unwinder reports any possibly unwound registers as
1868 <unavailable>. */
1869
0b722aec 1870const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1871{
1872 NORMAL_FRAME,
1873 record_btrace_frame_unwind_stop_reason,
1874 record_btrace_frame_this_id,
1875 record_btrace_frame_prev_register,
1876 NULL,
0b722aec
MM
1877 record_btrace_frame_sniffer,
1878 record_btrace_frame_dealloc_cache
1879};
1880
1881const struct frame_unwind record_btrace_tailcall_frame_unwind =
1882{
1883 TAILCALL_FRAME,
1884 record_btrace_frame_unwind_stop_reason,
1885 record_btrace_frame_this_id,
1886 record_btrace_frame_prev_register,
1887 NULL,
1888 record_btrace_tailcall_frame_sniffer,
1889 record_btrace_frame_dealloc_cache
cecac1ab 1890};
b2f4cfde 1891
f6ac5f3d 1892/* Implement the get_unwinder method. */
ac01945b 1893
f6ac5f3d
PA
1894const struct frame_unwind *
1895record_btrace_target::get_unwinder ()
ac01945b
TT
1896{
1897 return &record_btrace_frame_unwind;
1898}
1899
f6ac5f3d 1900/* Implement the get_tailcall_unwinder method. */
ac01945b 1901
f6ac5f3d
PA
1902const struct frame_unwind *
1903record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1904{
1905 return &record_btrace_tailcall_frame_unwind;
1906}
1907
987e68b1
MM
1908/* Return a human-readable string for FLAG. */
1909
1910static const char *
1911btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1912{
1913 switch (flag)
1914 {
1915 case BTHR_STEP:
1916 return "step";
1917
1918 case BTHR_RSTEP:
1919 return "reverse-step";
1920
1921 case BTHR_CONT:
1922 return "cont";
1923
1924 case BTHR_RCONT:
1925 return "reverse-cont";
1926
1927 case BTHR_STOP:
1928 return "stop";
1929 }
1930
1931 return "<invalid>";
1932}
1933
52834460
MM
1934/* Indicate that TP should be resumed according to FLAG. */
1935
1936static void
1937record_btrace_resume_thread (struct thread_info *tp,
1938 enum btrace_thread_flag flag)
1939{
1940 struct btrace_thread_info *btinfo;
1941
43792cf0 1942 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1943 target_pid_to_str (tp->ptid).c_str (), flag,
1944 btrace_thread_flag_to_str (flag));
52834460
MM
1945
1946 btinfo = &tp->btrace;
1947
52834460 1948 /* Fetch the latest branch trace. */
4a4495d6 1949 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1950
0ca912df
MM
1951 /* A resume request overwrites a preceding resume or stop request. */
1952 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1953 btinfo->flags |= flag;
1954}
1955
ec71cc2f
MM
1956/* Get the current frame for TP. */
1957
79b8d3b0
TT
1958static struct frame_id
1959get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1960{
79b8d3b0 1961 struct frame_id id;
ec71cc2f
MM
1962 int executing;
1963
00431a78
PA
1964 /* Set current thread, which is implicitly used by
1965 get_current_frame. */
1966 scoped_restore_current_thread restore_thread;
1967
1968 switch_to_thread (tp);
ec71cc2f
MM
1969
1970 /* Clear the executing flag to allow changes to the current frame.
1971 We are not actually running, yet. We just started a reverse execution
1972 command or a record goto command.
1973 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1974 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1975 move the thread. Since we need to recompute the stack, we temporarily
1976 set EXECUTING to flase. */
00431a78
PA
1977 executing = tp->executing;
1978 set_executing (inferior_ptid, false);
ec71cc2f 1979
79b8d3b0 1980 id = null_frame_id;
a70b8144 1981 try
ec71cc2f 1982 {
79b8d3b0 1983 id = get_frame_id (get_current_frame ());
ec71cc2f 1984 }
230d2906 1985 catch (const gdb_exception &except)
ec71cc2f
MM
1986 {
1987 /* Restore the previous execution state. */
1988 set_executing (inferior_ptid, executing);
1989
eedc3f4f 1990 throw;
ec71cc2f 1991 }
ec71cc2f
MM
1992
1993 /* Restore the previous execution state. */
1994 set_executing (inferior_ptid, executing);
1995
79b8d3b0 1996 return id;
ec71cc2f
MM
1997}
1998
52834460
MM
1999/* Start replaying a thread. */
2000
2001static struct btrace_insn_iterator *
2002record_btrace_start_replaying (struct thread_info *tp)
2003{
52834460
MM
2004 struct btrace_insn_iterator *replay;
2005 struct btrace_thread_info *btinfo;
52834460
MM
2006
2007 btinfo = &tp->btrace;
2008 replay = NULL;
2009
2010 /* We can't start replaying without trace. */
b54b03bd 2011 if (btinfo->functions.empty ())
52834460
MM
2012 return NULL;
2013
52834460
MM
2014 /* GDB stores the current frame_id when stepping in order to detects steps
2015 into subroutines.
2016 Since frames are computed differently when we're replaying, we need to
2017 recompute those stored frames and fix them up so we can still detect
2018 subroutines after we started replaying. */
a70b8144 2019 try
52834460 2020 {
52834460
MM
2021 struct frame_id frame_id;
2022 int upd_step_frame_id, upd_step_stack_frame_id;
2023
2024 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2025 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2026
2027 /* Check if we need to update any stepping-related frame id's. */
2028 upd_step_frame_id = frame_id_eq (frame_id,
2029 tp->control.step_frame_id);
2030 upd_step_stack_frame_id = frame_id_eq (frame_id,
2031 tp->control.step_stack_frame_id);
2032
2033 /* We start replaying at the end of the branch trace. This corresponds
2034 to the current instruction. */
8d749320 2035 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2036 btrace_insn_end (replay, btinfo);
2037
31fd9caa
MM
2038 /* Skip gaps at the end of the trace. */
2039 while (btrace_insn_get (replay) == NULL)
2040 {
2041 unsigned int steps;
2042
2043 steps = btrace_insn_prev (replay, 1);
2044 if (steps == 0)
2045 error (_("No trace."));
2046 }
2047
52834460
MM
2048 /* We're not replaying, yet. */
2049 gdb_assert (btinfo->replay == NULL);
2050 btinfo->replay = replay;
2051
2052 /* Make sure we're not using any stale registers. */
00431a78 2053 registers_changed_thread (tp);
52834460
MM
2054
2055 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2056 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2057
2058 /* Replace stepping related frames where necessary. */
2059 if (upd_step_frame_id)
2060 tp->control.step_frame_id = frame_id;
2061 if (upd_step_stack_frame_id)
2062 tp->control.step_stack_frame_id = frame_id;
2063 }
230d2906 2064 catch (const gdb_exception &except)
52834460
MM
2065 {
2066 xfree (btinfo->replay);
2067 btinfo->replay = NULL;
2068
00431a78 2069 registers_changed_thread (tp);
52834460 2070
eedc3f4f 2071 throw;
52834460
MM
2072 }
2073
2074 return replay;
2075}
2076
2077/* Stop replaying a thread. */
2078
2079static void
2080record_btrace_stop_replaying (struct thread_info *tp)
2081{
2082 struct btrace_thread_info *btinfo;
2083
2084 btinfo = &tp->btrace;
2085
2086 xfree (btinfo->replay);
2087 btinfo->replay = NULL;
2088
2089 /* Make sure we're not leaving any stale registers. */
00431a78 2090 registers_changed_thread (tp);
52834460
MM
2091}
2092
e3cfc1c7
MM
2093/* Stop replaying TP if it is at the end of its execution history. */
2094
2095static void
2096record_btrace_stop_replaying_at_end (struct thread_info *tp)
2097{
2098 struct btrace_insn_iterator *replay, end;
2099 struct btrace_thread_info *btinfo;
2100
2101 btinfo = &tp->btrace;
2102 replay = btinfo->replay;
2103
2104 if (replay == NULL)
2105 return;
2106
2107 btrace_insn_end (&end, btinfo);
2108
2109 if (btrace_insn_cmp (replay, &end) == 0)
2110 record_btrace_stop_replaying (tp);
2111}
2112
f6ac5f3d 2113/* The resume method of target record-btrace. */
b2f4cfde 2114
f6ac5f3d
PA
2115void
2116record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2117{
d2939ba2 2118 enum btrace_thread_flag flag, cflag;
52834460 2119
a068643d 2120 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2121 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2122 step ? "step" : "cont");
52834460 2123
0ca912df
MM
2124 /* Store the execution direction of the last resume.
2125
f6ac5f3d 2126 If there is more than one resume call, we have to rely on infrun
0ca912df 2127 to not change the execution direction in-between. */
f6ac5f3d 2128 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2129
0ca912df 2130 /* As long as we're not replaying, just forward the request.
52834460 2131
0ca912df
MM
2132 For non-stop targets this means that no thread is replaying. In order to
2133 make progress, we may need to explicitly move replaying threads to the end
2134 of their execution history. */
f6ac5f3d
PA
2135 if ((::execution_direction != EXEC_REVERSE)
2136 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2137 {
b6a8c27b 2138 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2139 return;
b2f4cfde
MM
2140 }
2141
52834460 2142 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2143 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2144 {
2145 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2146 cflag = BTHR_RCONT;
2147 }
52834460 2148 else
d2939ba2
MM
2149 {
2150 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2151 cflag = BTHR_CONT;
2152 }
52834460 2153
52834460 2154 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2155 record_btrace_wait below.
2156
2157 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2158 if (!target_is_non_stop_p ())
2159 {
26a57c92 2160 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2161
08036331
PA
2162 for (thread_info *tp : all_non_exited_threads (ptid))
2163 {
2164 if (tp->ptid.matches (inferior_ptid))
2165 record_btrace_resume_thread (tp, flag);
2166 else
2167 record_btrace_resume_thread (tp, cflag);
2168 }
d2939ba2
MM
2169 }
2170 else
2171 {
08036331
PA
2172 for (thread_info *tp : all_non_exited_threads (ptid))
2173 record_btrace_resume_thread (tp, flag);
d2939ba2 2174 }
70ad5bff
MM
2175
2176 /* Async support. */
2177 if (target_can_async_p ())
2178 {
6a3753b3 2179 target_async (1);
70ad5bff
MM
2180 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2181 }
52834460
MM
2182}
2183
f6ac5f3d 2184/* The commit_resume method of target record-btrace. */
85ad3aaf 2185
f6ac5f3d
PA
2186void
2187record_btrace_target::commit_resume ()
85ad3aaf 2188{
f6ac5f3d
PA
2189 if ((::execution_direction != EXEC_REVERSE)
2190 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2191 beneath ()->commit_resume ();
85ad3aaf
PA
2192}
2193
987e68b1
MM
2194/* Cancel resuming TP. */
2195
2196static void
2197record_btrace_cancel_resume (struct thread_info *tp)
2198{
2199 enum btrace_thread_flag flags;
2200
2201 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2202 if (flags == 0)
2203 return;
2204
43792cf0
PA
2205 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2206 print_thread_id (tp),
a068643d 2207 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1
MM
2208 btrace_thread_flag_to_str (flags));
2209
2210 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2211 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2212}
2213
2214/* Return a target_waitstatus indicating that we ran out of history. */
2215
2216static struct target_waitstatus
2217btrace_step_no_history (void)
2218{
2219 struct target_waitstatus status;
2220
2221 status.kind = TARGET_WAITKIND_NO_HISTORY;
2222
2223 return status;
2224}
2225
2226/* Return a target_waitstatus indicating that a step finished. */
2227
2228static struct target_waitstatus
2229btrace_step_stopped (void)
2230{
2231 struct target_waitstatus status;
2232
2233 status.kind = TARGET_WAITKIND_STOPPED;
2234 status.value.sig = GDB_SIGNAL_TRAP;
2235
2236 return status;
2237}
2238
6e4879f0
MM
2239/* Return a target_waitstatus indicating that a thread was stopped as
2240 requested. */
2241
2242static struct target_waitstatus
2243btrace_step_stopped_on_request (void)
2244{
2245 struct target_waitstatus status;
2246
2247 status.kind = TARGET_WAITKIND_STOPPED;
2248 status.value.sig = GDB_SIGNAL_0;
2249
2250 return status;
2251}
2252
d825d248
MM
2253/* Return a target_waitstatus indicating a spurious stop. */
2254
2255static struct target_waitstatus
2256btrace_step_spurious (void)
2257{
2258 struct target_waitstatus status;
2259
2260 status.kind = TARGET_WAITKIND_SPURIOUS;
2261
2262 return status;
2263}
2264
e3cfc1c7
MM
2265/* Return a target_waitstatus indicating that the thread was not resumed. */
2266
2267static struct target_waitstatus
2268btrace_step_no_resumed (void)
2269{
2270 struct target_waitstatus status;
2271
2272 status.kind = TARGET_WAITKIND_NO_RESUMED;
2273
2274 return status;
2275}
2276
2277/* Return a target_waitstatus indicating that we should wait again. */
2278
2279static struct target_waitstatus
2280btrace_step_again (void)
2281{
2282 struct target_waitstatus status;
2283
2284 status.kind = TARGET_WAITKIND_IGNORE;
2285
2286 return status;
2287}
2288
52834460
MM
2289/* Clear the record histories. */
2290
2291static void
2292record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2293{
2294 xfree (btinfo->insn_history);
2295 xfree (btinfo->call_history);
2296
2297 btinfo->insn_history = NULL;
2298 btinfo->call_history = NULL;
2299}
2300
3c615f99
MM
2301/* Check whether TP's current replay position is at a breakpoint. */
2302
2303static int
2304record_btrace_replay_at_breakpoint (struct thread_info *tp)
2305{
2306 struct btrace_insn_iterator *replay;
2307 struct btrace_thread_info *btinfo;
2308 const struct btrace_insn *insn;
3c615f99
MM
2309
2310 btinfo = &tp->btrace;
2311 replay = btinfo->replay;
2312
2313 if (replay == NULL)
2314 return 0;
2315
2316 insn = btrace_insn_get (replay);
2317 if (insn == NULL)
2318 return 0;
2319
00431a78 2320 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2321 &btinfo->stop_reason);
2322}
2323
d825d248 2324/* Step one instruction in forward direction. */
52834460
MM
2325
2326static struct target_waitstatus
d825d248 2327record_btrace_single_step_forward (struct thread_info *tp)
52834460 2328{
b61ce85c 2329 struct btrace_insn_iterator *replay, end, start;
52834460 2330 struct btrace_thread_info *btinfo;
52834460 2331
d825d248
MM
2332 btinfo = &tp->btrace;
2333 replay = btinfo->replay;
2334
2335 /* We're done if we're not replaying. */
2336 if (replay == NULL)
2337 return btrace_step_no_history ();
2338
011c71b6
MM
2339 /* Check if we're stepping a breakpoint. */
2340 if (record_btrace_replay_at_breakpoint (tp))
2341 return btrace_step_stopped ();
2342
b61ce85c
MM
2343 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2344 jump back to the instruction at which we started. */
2345 start = *replay;
d825d248
MM
2346 do
2347 {
2348 unsigned int steps;
2349
e3cfc1c7
MM
2350 /* We will bail out here if we continue stepping after reaching the end
2351 of the execution history. */
d825d248
MM
2352 steps = btrace_insn_next (replay, 1);
2353 if (steps == 0)
b61ce85c
MM
2354 {
2355 *replay = start;
2356 return btrace_step_no_history ();
2357 }
d825d248
MM
2358 }
2359 while (btrace_insn_get (replay) == NULL);
2360
2361 /* Determine the end of the instruction trace. */
2362 btrace_insn_end (&end, btinfo);
2363
e3cfc1c7
MM
2364 /* The execution trace contains (and ends with) the current instruction.
2365 This instruction has not been executed, yet, so the trace really ends
2366 one instruction earlier. */
d825d248 2367 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2368 return btrace_step_no_history ();
d825d248
MM
2369
2370 return btrace_step_spurious ();
2371}
2372
2373/* Step one instruction in backward direction. */
2374
2375static struct target_waitstatus
2376record_btrace_single_step_backward (struct thread_info *tp)
2377{
b61ce85c 2378 struct btrace_insn_iterator *replay, start;
d825d248 2379 struct btrace_thread_info *btinfo;
e59fa00f 2380
52834460
MM
2381 btinfo = &tp->btrace;
2382 replay = btinfo->replay;
2383
d825d248
MM
2384 /* Start replaying if we're not already doing so. */
2385 if (replay == NULL)
2386 replay = record_btrace_start_replaying (tp);
2387
2388 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2389 Skip gaps during replay. If we end up at a gap (at the beginning of
2390 the trace), jump back to the instruction at which we started. */
2391 start = *replay;
d825d248
MM
2392 do
2393 {
2394 unsigned int steps;
2395
2396 steps = btrace_insn_prev (replay, 1);
2397 if (steps == 0)
b61ce85c
MM
2398 {
2399 *replay = start;
2400 return btrace_step_no_history ();
2401 }
d825d248
MM
2402 }
2403 while (btrace_insn_get (replay) == NULL);
2404
011c71b6
MM
2405 /* Check if we're stepping a breakpoint.
2406
2407 For reverse-stepping, this check is after the step. There is logic in
2408 infrun.c that handles reverse-stepping separately. See, for example,
2409 proceed and adjust_pc_after_break.
2410
2411 This code assumes that for reverse-stepping, PC points to the last
2412 de-executed instruction, whereas for forward-stepping PC points to the
2413 next to-be-executed instruction. */
2414 if (record_btrace_replay_at_breakpoint (tp))
2415 return btrace_step_stopped ();
2416
d825d248
MM
2417 return btrace_step_spurious ();
2418}
2419
2420/* Step a single thread. */
2421
2422static struct target_waitstatus
2423record_btrace_step_thread (struct thread_info *tp)
2424{
2425 struct btrace_thread_info *btinfo;
2426 struct target_waitstatus status;
2427 enum btrace_thread_flag flags;
2428
2429 btinfo = &tp->btrace;
2430
6e4879f0
MM
2431 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2432 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2433
43792cf0 2434 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d 2435 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1 2436 btrace_thread_flag_to_str (flags));
52834460 2437
6e4879f0
MM
2438 /* We can't step without an execution history. */
2439 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2440 return btrace_step_no_history ();
2441
52834460
MM
2442 switch (flags)
2443 {
2444 default:
2445 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2446
6e4879f0
MM
2447 case BTHR_STOP:
2448 return btrace_step_stopped_on_request ();
2449
52834460 2450 case BTHR_STEP:
d825d248
MM
2451 status = record_btrace_single_step_forward (tp);
2452 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2453 break;
52834460
MM
2454
2455 return btrace_step_stopped ();
2456
2457 case BTHR_RSTEP:
d825d248
MM
2458 status = record_btrace_single_step_backward (tp);
2459 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2460 break;
52834460
MM
2461
2462 return btrace_step_stopped ();
2463
2464 case BTHR_CONT:
e3cfc1c7
MM
2465 status = record_btrace_single_step_forward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2467 break;
52834460 2468
e3cfc1c7
MM
2469 btinfo->flags |= flags;
2470 return btrace_step_again ();
52834460
MM
2471
2472 case BTHR_RCONT:
e3cfc1c7
MM
2473 status = record_btrace_single_step_backward (tp);
2474 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2475 break;
52834460 2476
e3cfc1c7
MM
2477 btinfo->flags |= flags;
2478 return btrace_step_again ();
2479 }
d825d248 2480
f6ac5f3d 2481 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2482 method will stop the thread for whom the event is reported. */
2483 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2484 btinfo->flags |= flags;
52834460 2485
e3cfc1c7 2486 return status;
b2f4cfde
MM
2487}
2488
a6b5be76
MM
2489/* Announce further events if necessary. */
2490
2491static void
53127008
SM
2492record_btrace_maybe_mark_async_event
2493 (const std::vector<thread_info *> &moving,
2494 const std::vector<thread_info *> &no_history)
a6b5be76 2495{
53127008
SM
2496 bool more_moving = !moving.empty ();
2497 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2498
2499 if (!more_moving && !more_no_history)
2500 return;
2501
2502 if (more_moving)
2503 DEBUG ("movers pending");
2504
2505 if (more_no_history)
2506 DEBUG ("no-history pending");
2507
2508 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2509}
2510
f6ac5f3d 2511/* The wait method of target record-btrace. */
b2f4cfde 2512
f6ac5f3d
PA
2513ptid_t
2514record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2515 int options)
b2f4cfde 2516{
53127008
SM
2517 std::vector<thread_info *> moving;
2518 std::vector<thread_info *> no_history;
52834460 2519
a068643d 2520 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
52834460 2521
b2f4cfde 2522 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2523 if ((::execution_direction != EXEC_REVERSE)
2524 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2525 {
b6a8c27b 2526 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2527 }
2528
e3cfc1c7 2529 /* Keep a work list of moving threads. */
08036331
PA
2530 for (thread_info *tp : all_non_exited_threads (ptid))
2531 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2532 moving.push_back (tp);
e3cfc1c7 2533
53127008 2534 if (moving.empty ())
52834460 2535 {
e3cfc1c7 2536 *status = btrace_step_no_resumed ();
52834460 2537
a068643d 2538 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2539 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2540
e3cfc1c7 2541 return null_ptid;
52834460
MM
2542 }
2543
e3cfc1c7
MM
2544 /* Step moving threads one by one, one step each, until either one thread
2545 reports an event or we run out of threads to step.
2546
2547 When stepping more than one thread, chances are that some threads reach
2548 the end of their execution history earlier than others. If we reported
2549 this immediately, all-stop on top of non-stop would stop all threads and
2550 resume the same threads next time. And we would report the same thread
2551 having reached the end of its execution history again.
2552
2553 In the worst case, this would starve the other threads. But even if other
2554 threads would be allowed to make progress, this would result in far too
2555 many intermediate stops.
2556
2557 We therefore delay the reporting of "no execution history" until we have
2558 nothing else to report. By this time, all threads should have moved to
2559 either the beginning or the end of their execution history. There will
2560 be a single user-visible stop. */
53127008
SM
2561 struct thread_info *eventing = NULL;
2562 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2563 {
53127008 2564 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2565 {
53127008
SM
2566 thread_info *tp = moving[ix];
2567
e3cfc1c7
MM
2568 *status = record_btrace_step_thread (tp);
2569
2570 switch (status->kind)
2571 {
2572 case TARGET_WAITKIND_IGNORE:
2573 ix++;
2574 break;
2575
2576 case TARGET_WAITKIND_NO_HISTORY:
53127008 2577 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2578 break;
2579
2580 default:
53127008 2581 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2582 break;
2583 }
2584 }
2585 }
2586
2587 if (eventing == NULL)
2588 {
2589 /* We started with at least one moving thread. This thread must have
2590 either stopped or reached the end of its execution history.
2591
2592 In the former case, EVENTING must not be NULL.
2593 In the latter case, NO_HISTORY must not be empty. */
53127008 2594 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2595
2596 /* We kept threads moving at the end of their execution history. Stop
2597 EVENTING now that we are going to report its stop. */
53127008 2598 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2599 eventing->btrace.flags &= ~BTHR_MOVE;
2600
2601 *status = btrace_step_no_history ();
2602 }
2603
2604 gdb_assert (eventing != NULL);
2605
2606 /* We kept threads replaying at the end of their execution history. Stop
2607 replaying EVENTING now that we are going to report its stop. */
2608 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2609
2610 /* Stop all other threads. */
5953356c 2611 if (!target_is_non_stop_p ())
53127008 2612 {
08036331 2613 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2614 record_btrace_cancel_resume (tp);
2615 }
52834460 2616
a6b5be76
MM
2617 /* In async mode, we need to announce further events. */
2618 if (target_is_async_p ())
2619 record_btrace_maybe_mark_async_event (moving, no_history);
2620
52834460 2621 /* Start record histories anew from the current position. */
e3cfc1c7 2622 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2623
2624 /* We moved the replay position but did not update registers. */
00431a78 2625 registers_changed_thread (eventing);
e3cfc1c7 2626
43792cf0
PA
2627 DEBUG ("wait ended by thread %s (%s): %s",
2628 print_thread_id (eventing),
a068643d 2629 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2630 target_waitstatus_to_string (status).c_str ());
52834460 2631
e3cfc1c7 2632 return eventing->ptid;
52834460
MM
2633}
2634
f6ac5f3d 2635/* The stop method of target record-btrace. */
6e4879f0 2636
f6ac5f3d
PA
2637void
2638record_btrace_target::stop (ptid_t ptid)
6e4879f0 2639{
a068643d 2640 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2641
2642 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2643 if ((::execution_direction != EXEC_REVERSE)
2644 && !record_is_replaying (minus_one_ptid))
6e4879f0 2645 {
b6a8c27b 2646 this->beneath ()->stop (ptid);
6e4879f0
MM
2647 }
2648 else
2649 {
08036331
PA
2650 for (thread_info *tp : all_non_exited_threads (ptid))
2651 {
2652 tp->btrace.flags &= ~BTHR_MOVE;
2653 tp->btrace.flags |= BTHR_STOP;
2654 }
6e4879f0
MM
2655 }
2656 }
2657
f6ac5f3d 2658/* The can_execute_reverse method of target record-btrace. */
52834460 2659
57810aa7 2660bool
f6ac5f3d 2661record_btrace_target::can_execute_reverse ()
52834460 2662{
57810aa7 2663 return true;
52834460
MM
2664}
2665
f6ac5f3d 2666/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2667
57810aa7 2668bool
f6ac5f3d 2669record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2670{
f6ac5f3d 2671 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2672 {
2673 struct thread_info *tp = inferior_thread ();
2674
2675 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2676 }
2677
b6a8c27b 2678 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2679}
2680
f6ac5f3d 2681/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2682 record-btrace. */
2683
57810aa7 2684bool
f6ac5f3d 2685record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2686{
f6ac5f3d 2687 if (record_is_replaying (minus_one_ptid))
57810aa7 2688 return true;
9e8915c6 2689
b6a8c27b 2690 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2691}
2692
f6ac5f3d 2693/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2694
57810aa7 2695bool
f6ac5f3d 2696record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2697{
f6ac5f3d 2698 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2699 {
2700 struct thread_info *tp = inferior_thread ();
2701
2702 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2703 }
2704
b6a8c27b 2705 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2706}
2707
f6ac5f3d 2708/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2709 record-btrace. */
2710
57810aa7 2711bool
f6ac5f3d 2712record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2713{
f6ac5f3d 2714 if (record_is_replaying (minus_one_ptid))
57810aa7 2715 return true;
52834460 2716
b6a8c27b 2717 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2718}
2719
f6ac5f3d 2720/* The update_thread_list method of target record-btrace. */
e2887aa3 2721
f6ac5f3d
PA
2722void
2723record_btrace_target::update_thread_list ()
e2887aa3 2724{
e8032dde 2725 /* We don't add or remove threads during replay. */
f6ac5f3d 2726 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2727 return;
2728
2729 /* Forward the request. */
b6a8c27b 2730 this->beneath ()->update_thread_list ();
e2887aa3
MM
2731}
2732
f6ac5f3d 2733/* The thread_alive method of target record-btrace. */
e2887aa3 2734
57810aa7 2735bool
f6ac5f3d 2736record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2737{
2738 /* We don't add or remove threads during replay. */
f6ac5f3d 2739 if (record_is_replaying (minus_one_ptid))
00431a78 2740 return true;
e2887aa3
MM
2741
2742 /* Forward the request. */
b6a8c27b 2743 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2744}
2745
066ce621
MM
2746/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2747 is stopped. */
2748
2749static void
2750record_btrace_set_replay (struct thread_info *tp,
2751 const struct btrace_insn_iterator *it)
2752{
2753 struct btrace_thread_info *btinfo;
2754
2755 btinfo = &tp->btrace;
2756
a0f1b963 2757 if (it == NULL)
52834460 2758 record_btrace_stop_replaying (tp);
066ce621
MM
2759 else
2760 {
2761 if (btinfo->replay == NULL)
52834460 2762 record_btrace_start_replaying (tp);
066ce621
MM
2763 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2764 return;
2765
2766 *btinfo->replay = *it;
00431a78 2767 registers_changed_thread (tp);
066ce621
MM
2768 }
2769
52834460
MM
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo);
485668e5 2772
f2ffa92b
PA
2773 inferior_thread ()->suspend.stop_pc
2774 = regcache_read_pc (get_current_regcache ());
485668e5 2775 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2776}
2777
f6ac5f3d 2778/* The goto_record_begin method of target record-btrace. */
066ce621 2779
f6ac5f3d
PA
2780void
2781record_btrace_target::goto_record_begin ()
066ce621
MM
2782{
2783 struct thread_info *tp;
2784 struct btrace_insn_iterator begin;
2785
2786 tp = require_btrace_thread ();
2787
2788 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2789
2790 /* Skip gaps at the beginning of the trace. */
2791 while (btrace_insn_get (&begin) == NULL)
2792 {
2793 unsigned int steps;
2794
2795 steps = btrace_insn_next (&begin, 1);
2796 if (steps == 0)
2797 error (_("No trace."));
2798 }
2799
066ce621 2800 record_btrace_set_replay (tp, &begin);
066ce621
MM
2801}
2802
f6ac5f3d 2803/* The goto_record_end method of target record-btrace. */
066ce621 2804
f6ac5f3d
PA
2805void
2806record_btrace_target::goto_record_end ()
066ce621
MM
2807{
2808 struct thread_info *tp;
2809
2810 tp = require_btrace_thread ();
2811
2812 record_btrace_set_replay (tp, NULL);
066ce621
MM
2813}
2814
f6ac5f3d 2815/* The goto_record method of target record-btrace. */
066ce621 2816
f6ac5f3d
PA
2817void
2818record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2819{
2820 struct thread_info *tp;
2821 struct btrace_insn_iterator it;
2822 unsigned int number;
2823 int found;
2824
2825 number = insn;
2826
2827 /* Check for wrap-arounds. */
2828 if (number != insn)
2829 error (_("Instruction number out of range."));
2830
2831 tp = require_btrace_thread ();
2832
2833 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2834
2835 /* Check if the instruction could not be found or is a gap. */
2836 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2837 error (_("No such instruction."));
2838
2839 record_btrace_set_replay (tp, &it);
066ce621
MM
2840}
2841
f6ac5f3d 2842/* The record_stop_replaying method of target record-btrace. */
797094dd 2843
f6ac5f3d
PA
2844void
2845record_btrace_target::record_stop_replaying ()
797094dd 2846{
08036331 2847 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2848 record_btrace_stop_replaying (tp);
2849}
2850
f6ac5f3d 2851/* The execution_direction target method. */
70ad5bff 2852
f6ac5f3d
PA
2853enum exec_direction_kind
2854record_btrace_target::execution_direction ()
70ad5bff
MM
2855{
2856 return record_btrace_resume_exec_dir;
2857}
2858
f6ac5f3d 2859/* The prepare_to_generate_core target method. */
aef92902 2860
f6ac5f3d
PA
2861void
2862record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2863{
2864 record_btrace_generating_corefile = 1;
2865}
2866
f6ac5f3d 2867/* The done_generating_core target method. */
aef92902 2868
f6ac5f3d
PA
2869void
2870record_btrace_target::done_generating_core ()
aef92902
MM
2871{
2872 record_btrace_generating_corefile = 0;
2873}
2874
f4abbc16
MM
2875/* Start recording in BTS format. */
2876
2877static void
cdb34d4a 2878cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2879{
f4abbc16
MM
2880 if (args != NULL && *args != 0)
2881 error (_("Invalid argument."));
2882
2883 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2884
a70b8144 2885 try
492d29ea 2886 {
95a6b0a1 2887 execute_command ("target record-btrace", from_tty);
492d29ea 2888 }
230d2906 2889 catch (const gdb_exception &exception)
f4abbc16
MM
2890 {
2891 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2892 throw;
f4abbc16
MM
2893 }
2894}
2895
bc504a31 2896/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2897
2898static void
cdb34d4a 2899cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2900{
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2903
b20a6524 2904 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2905
a70b8144 2906 try
492d29ea 2907 {
95a6b0a1 2908 execute_command ("target record-btrace", from_tty);
492d29ea 2909 }
230d2906 2910 catch (const gdb_exception &exception)
492d29ea
PA
2911 {
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2913 throw;
492d29ea 2914 }
afedecd3
MM
2915}
2916
b20a6524
MM
2917/* Alias for "target record". */
2918
2919static void
981a3fb3 2920cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2921{
2922 if (args != NULL && *args != 0)
2923 error (_("Invalid argument."));
2924
2925 record_btrace_conf.format = BTRACE_FORMAT_PT;
2926
a70b8144 2927 try
b20a6524 2928 {
95a6b0a1 2929 execute_command ("target record-btrace", from_tty);
b20a6524 2930 }
230d2906 2931 catch (const gdb_exception &exception)
b20a6524
MM
2932 {
2933 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2934
a70b8144 2935 try
b20a6524 2936 {
95a6b0a1 2937 execute_command ("target record-btrace", from_tty);
b20a6524 2938 }
230d2906 2939 catch (const gdb_exception &ex)
b20a6524
MM
2940 {
2941 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2942 throw;
b20a6524 2943 }
b20a6524 2944 }
b20a6524
MM
2945}
2946
67b5c0c1
MM
2947/* The "set record btrace" command. */
2948
2949static void
981a3fb3 2950cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 2951{
b85310e1
MM
2952 printf_unfiltered (_("\"set record btrace\" must be followed "
2953 "by an appropriate subcommand.\n"));
2954 help_list (set_record_btrace_cmdlist, "set record btrace ",
2955 all_commands, gdb_stdout);
67b5c0c1
MM
2956}
2957
2958/* The "show record btrace" command. */
2959
2960static void
981a3fb3 2961cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2962{
2963 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2964}
2965
2966/* The "show record btrace replay-memory-access" command. */
2967
2968static void
2969cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2970 struct cmd_list_element *c, const char *value)
2971{
2972 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2973 replay_memory_access);
2974}
2975
4a4495d6
MM
2976/* The "set record btrace cpu none" command. */
2977
2978static void
2979cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2980{
2981 if (args != nullptr && *args != 0)
2982 error (_("Trailing junk: '%s'."), args);
2983
2984 record_btrace_cpu_state = CS_NONE;
2985}
2986
2987/* The "set record btrace cpu auto" command. */
2988
2989static void
2990cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2991{
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2994
2995 record_btrace_cpu_state = CS_AUTO;
2996}
2997
2998/* The "set record btrace cpu" command. */
2999
3000static void
3001cmd_set_record_btrace_cpu (const char *args, int from_tty)
3002{
3003 if (args == nullptr)
3004 args = "";
3005
3006 /* We use a hard-coded vendor string for now. */
3007 unsigned int family, model, stepping;
3008 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3009 &model, &l1, &stepping, &l2);
3010 if (matches == 3)
3011 {
3012 if (strlen (args) != l2)
3013 error (_("Trailing junk: '%s'."), args + l2);
3014 }
3015 else if (matches == 2)
3016 {
3017 if (strlen (args) != l1)
3018 error (_("Trailing junk: '%s'."), args + l1);
3019
3020 stepping = 0;
3021 }
3022 else
3023 error (_("Bad format. See \"help set record btrace cpu\"."));
3024
3025 if (USHRT_MAX < family)
3026 error (_("Cpu family too big."));
3027
3028 if (UCHAR_MAX < model)
3029 error (_("Cpu model too big."));
3030
3031 if (UCHAR_MAX < stepping)
3032 error (_("Cpu stepping too big."));
3033
3034 record_btrace_cpu.vendor = CV_INTEL;
3035 record_btrace_cpu.family = family;
3036 record_btrace_cpu.model = model;
3037 record_btrace_cpu.stepping = stepping;
3038
3039 record_btrace_cpu_state = CS_CPU;
3040}
3041
3042/* The "show record btrace cpu" command. */
3043
3044static void
3045cmd_show_record_btrace_cpu (const char *args, int from_tty)
3046{
4a4495d6
MM
3047 if (args != nullptr && *args != 0)
3048 error (_("Trailing junk: '%s'."), args);
3049
3050 switch (record_btrace_cpu_state)
3051 {
3052 case CS_AUTO:
3053 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3054 return;
3055
3056 case CS_NONE:
3057 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3058 return;
3059
3060 case CS_CPU:
3061 switch (record_btrace_cpu.vendor)
3062 {
3063 case CV_INTEL:
3064 if (record_btrace_cpu.stepping == 0)
3065 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3066 record_btrace_cpu.family,
3067 record_btrace_cpu.model);
3068 else
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3070 record_btrace_cpu.family,
3071 record_btrace_cpu.model,
3072 record_btrace_cpu.stepping);
3073 return;
3074 }
3075 }
3076
3077 error (_("Internal error: bad cpu state."));
3078}
3079
3080/* The "s record btrace bts" command. */
d33501a5
MM
3081
3082static void
981a3fb3 3083cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3084{
3085 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3086 "by an appropriate subcommand.\n"));
d33501a5
MM
3087 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3088 all_commands, gdb_stdout);
3089}
3090
3091/* The "show record btrace bts" command. */
3092
3093static void
981a3fb3 3094cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3095{
3096 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3097}
3098
b20a6524
MM
3099/* The "set record btrace pt" command. */
3100
3101static void
981a3fb3 3102cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3103{
3104 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3105 "by an appropriate subcommand.\n"));
3106 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3107 all_commands, gdb_stdout);
3108}
3109
3110/* The "show record btrace pt" command. */
3111
3112static void
981a3fb3 3113cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3114{
3115 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3116}
3117
3118/* The "record bts buffer-size" show value function. */
3119
3120static void
3121show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3122 struct cmd_list_element *c,
3123 const char *value)
3124{
3125 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3126 value);
3127}
3128
3129/* The "record pt buffer-size" show value function. */
3130
3131static void
3132show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3133 struct cmd_list_element *c,
3134 const char *value)
3135{
3136 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3137 value);
3138}
3139
afedecd3
MM
3140/* Initialize btrace commands. */
3141
3142void
3143_initialize_record_btrace (void)
3144{
f4abbc16
MM
3145 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3146 _("Start branch trace recording."), &record_btrace_cmdlist,
3147 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3148 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3149
f4abbc16
MM
3150 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3151 _("\
3152Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3153The processor stores a from/to record for each branch into a cyclic buffer.\n\
3154This format may not be available on all processors."),
3155 &record_btrace_cmdlist);
3156 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3157
b20a6524
MM
3158 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3159 _("\
bc504a31 3160Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3161This format may not be available on all processors."),
3162 &record_btrace_cmdlist);
3163 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3164
67b5c0c1
MM
3165 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3166 _("Set record options"), &set_record_btrace_cmdlist,
3167 "set record btrace ", 0, &set_record_cmdlist);
3168
3169 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3170 _("Show record options"), &show_record_btrace_cmdlist,
3171 "show record btrace ", 0, &show_record_cmdlist);
3172
3173 add_setshow_enum_cmd ("replay-memory-access", no_class,
3174 replay_memory_access_types, &replay_memory_access, _("\
3175Set what memory accesses are allowed during replay."), _("\
3176Show what memory accesses are allowed during replay."),
3177 _("Default is READ-ONLY.\n\n\
3178The btrace record target does not trace data.\n\
3179The memory therefore corresponds to the live target and not \
3180to the current replay position.\n\n\
3181When READ-ONLY, allow accesses to read-only memory during replay.\n\
3182When READ-WRITE, allow accesses to read-only and read-write memory during \
3183replay."),
3184 NULL, cmd_show_replay_memory_access,
3185 &set_record_btrace_cmdlist,
3186 &show_record_btrace_cmdlist);
3187
4a4495d6
MM
3188 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3189 _("\
3190Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3191The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3192For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3193When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3194The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3195When GDB does not support that cpu, this option can be used to enable\n\
3196workarounds for a similar cpu that GDB supports.\n\n\
3197When set to \"none\", errata workarounds are disabled."),
3198 &set_record_btrace_cpu_cmdlist,
3199 _("set record btrace cpu "), 1,
3200 &set_record_btrace_cmdlist);
3201
3202 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3203Automatically determine the cpu to be used for trace decode."),
3204 &set_record_btrace_cpu_cmdlist);
3205
3206 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3207Do not enable errata workarounds for trace decode."),
3208 &set_record_btrace_cpu_cmdlist);
3209
3210 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3211Show the cpu to be used for trace decode."),
3212 &show_record_btrace_cmdlist);
3213
d33501a5
MM
3214 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3215 _("Set record btrace bts options"),
3216 &set_record_btrace_bts_cmdlist,
3217 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3218
3219 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3220 _("Show record btrace bts options"),
3221 &show_record_btrace_bts_cmdlist,
3222 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3223
3224 add_setshow_uinteger_cmd ("buffer-size", no_class,
3225 &record_btrace_conf.bts.size,
3226 _("Set the record/replay bts buffer size."),
3227 _("Show the record/replay bts buffer size."), _("\
3228When starting recording request a trace buffer of this size. \
3229The actual buffer size may differ from the requested size. \
3230Use \"info record\" to see the actual buffer size.\n\n\
3231Bigger buffers allow longer recording but also take more time to process \
3232the recorded execution trace.\n\n\
b20a6524
MM
3233The trace buffer size may not be changed while recording."), NULL,
3234 show_record_bts_buffer_size_value,
d33501a5
MM
3235 &set_record_btrace_bts_cmdlist,
3236 &show_record_btrace_bts_cmdlist);
3237
b20a6524
MM
3238 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3239 _("Set record btrace pt options"),
3240 &set_record_btrace_pt_cmdlist,
3241 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3242
3243 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3244 _("Show record btrace pt options"),
3245 &show_record_btrace_pt_cmdlist,
3246 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3247
3248 add_setshow_uinteger_cmd ("buffer-size", no_class,
3249 &record_btrace_conf.pt.size,
3250 _("Set the record/replay pt buffer size."),
3251 _("Show the record/replay pt buffer size."), _("\
3252Bigger buffers allow longer recording but also take more time to process \
3253the recorded execution.\n\
3254The actual buffer size may differ from the requested size. Use \"info record\" \
3255to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3256 &set_record_btrace_pt_cmdlist,
3257 &show_record_btrace_pt_cmdlist);
3258
d9f719f1 3259 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3260
3261 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3262 xcalloc, xfree);
d33501a5
MM
3263
3264 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3265 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3266}