]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
Avoid memcpys in regcache read_part/write_part for full registers.
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3 43
d9f719f1
PA
44static const target_info record_btrace_target_info = {
45 "record-btrace",
46 N_("Branch tracing target"),
47 N_("Collect control-flow trace and provide the execution history.")
48};
49
afedecd3 50/* The target_ops of record-btrace. */
f6ac5f3d
PA
51
52class record_btrace_target final : public target_ops
53{
54public:
55 record_btrace_target ()
56 { to_stratum = record_stratum; }
57
d9f719f1
PA
58 const target_info &info () const override
59 { return record_btrace_target_info; }
f6ac5f3d 60
f6ac5f3d
PA
61 void close () override;
62 void async (int) override;
63
64 void detach (inferior *inf, int from_tty) override
65 { record_detach (this, inf, from_tty); }
66
67 void disconnect (const char *, int) override;
68
69 void mourn_inferior () override
70 { record_mourn_inferior (this); }
71
72 void kill () override
73 { record_kill (this); }
74
75 enum record_method record_method (ptid_t ptid) override;
76
77 void stop_recording () override;
78 void info_record () override;
79
80 void insn_history (int size, gdb_disassembly_flags flags) override;
81 void insn_history_from (ULONGEST from, int size,
82 gdb_disassembly_flags flags) override;
83 void insn_history_range (ULONGEST begin, ULONGEST end,
84 gdb_disassembly_flags flags) override;
85 void call_history (int size, record_print_flags flags) override;
86 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
87 override;
88 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
89 override;
90
57810aa7
PA
91 bool record_is_replaying (ptid_t ptid) override;
92 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
93 void record_stop_replaying () override;
94
95 enum target_xfer_status xfer_partial (enum target_object object,
96 const char *annex,
97 gdb_byte *readbuf,
98 const gdb_byte *writebuf,
99 ULONGEST offset, ULONGEST len,
100 ULONGEST *xfered_len) override;
101
102 int insert_breakpoint (struct gdbarch *,
103 struct bp_target_info *) override;
104 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
105 enum remove_bp_reason) override;
106
107 void fetch_registers (struct regcache *, int) override;
108
109 void store_registers (struct regcache *, int) override;
110 void prepare_to_store (struct regcache *) override;
111
112 const struct frame_unwind *get_unwinder () override;
113
114 const struct frame_unwind *get_tailcall_unwinder () override;
115
116 void commit_resume () override;
117 void resume (ptid_t, int, enum gdb_signal) override;
118 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
119
120 void stop (ptid_t) override;
121 void update_thread_list () override;
57810aa7 122 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
123 void goto_record_begin () override;
124 void goto_record_end () override;
125 void goto_record (ULONGEST insn) override;
126
57810aa7 127 bool can_execute_reverse () override;
f6ac5f3d 128
57810aa7
PA
129 bool stopped_by_sw_breakpoint () override;
130 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 131
57810aa7
PA
132 bool stopped_by_hw_breakpoint () override;
133 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
134
135 enum exec_direction_kind execution_direction () override;
136 void prepare_to_generate_core () override;
137 void done_generating_core () override;
138};
139
140static record_btrace_target record_btrace_ops;
141
142/* Initialize the record-btrace target ops. */
afedecd3 143
76727919
TT
144/* Token associated with a new-thread observer enabling branch tracing
145 for the new thread. */
146static const gdb::observers::token record_btrace_thread_observer_token;
afedecd3 147
67b5c0c1
MM
148/* Memory access types used in set/show record btrace replay-memory-access. */
149static const char replay_memory_access_read_only[] = "read-only";
150static const char replay_memory_access_read_write[] = "read-write";
151static const char *const replay_memory_access_types[] =
152{
153 replay_memory_access_read_only,
154 replay_memory_access_read_write,
155 NULL
156};
157
158/* The currently allowed replay memory access type. */
159static const char *replay_memory_access = replay_memory_access_read_only;
160
4a4495d6
MM
161/* The cpu state kinds. */
162enum record_btrace_cpu_state_kind
163{
164 CS_AUTO,
165 CS_NONE,
166 CS_CPU
167};
168
169/* The current cpu state. */
170static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
171
172/* The current cpu for trace decode. */
173static struct btrace_cpu record_btrace_cpu;
174
67b5c0c1
MM
175/* Command lists for "set/show record btrace". */
176static struct cmd_list_element *set_record_btrace_cmdlist;
177static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 178
70ad5bff
MM
179/* The execution direction of the last resume we got. See record-full.c. */
180static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
181
182/* The async event handler for reverse/replay execution. */
183static struct async_event_handler *record_btrace_async_inferior_event_handler;
184
aef92902
MM
185/* A flag indicating that we are currently generating a core file. */
186static int record_btrace_generating_corefile;
187
f4abbc16
MM
188/* The current branch trace configuration. */
189static struct btrace_config record_btrace_conf;
190
191/* Command list for "record btrace". */
192static struct cmd_list_element *record_btrace_cmdlist;
193
d33501a5
MM
194/* Command lists for "set/show record btrace bts". */
195static struct cmd_list_element *set_record_btrace_bts_cmdlist;
196static struct cmd_list_element *show_record_btrace_bts_cmdlist;
197
b20a6524
MM
198/* Command lists for "set/show record btrace pt". */
199static struct cmd_list_element *set_record_btrace_pt_cmdlist;
200static struct cmd_list_element *show_record_btrace_pt_cmdlist;
201
4a4495d6
MM
202/* Command list for "set record btrace cpu". */
203static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
204
afedecd3
MM
205/* Print a record-btrace debug message. Use do ... while (0) to avoid
206 ambiguities when used in if statements. */
207
208#define DEBUG(msg, args...) \
209 do \
210 { \
211 if (record_debug != 0) \
212 fprintf_unfiltered (gdb_stdlog, \
213 "[record-btrace] " msg "\n", ##args); \
214 } \
215 while (0)
216
217
4a4495d6
MM
218/* Return the cpu configured by the user. Returns NULL if the cpu was
219 configured as auto. */
220const struct btrace_cpu *
221record_btrace_get_cpu (void)
222{
223 switch (record_btrace_cpu_state)
224 {
225 case CS_AUTO:
226 return nullptr;
227
228 case CS_NONE:
229 record_btrace_cpu.vendor = CV_UNKNOWN;
230 /* Fall through. */
231 case CS_CPU:
232 return &record_btrace_cpu;
233 }
234
235 error (_("Internal error: bad record btrace cpu state."));
236}
237
afedecd3 238/* Update the branch trace for the current thread and return a pointer to its
066ce621 239 thread_info.
afedecd3
MM
240
241 Throws an error if there is no thread or no trace. This function never
242 returns NULL. */
243
066ce621
MM
244static struct thread_info *
245require_btrace_thread (void)
afedecd3
MM
246{
247 struct thread_info *tp;
afedecd3
MM
248
249 DEBUG ("require");
250
251 tp = find_thread_ptid (inferior_ptid);
252 if (tp == NULL)
253 error (_("No thread."));
254
cd4007e4
MM
255 validate_registers_access ();
256
4a4495d6 257 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 258
6e07b1d2 259 if (btrace_is_empty (tp))
afedecd3
MM
260 error (_("No trace."));
261
066ce621
MM
262 return tp;
263}
264
265/* Update the branch trace for the current thread and return a pointer to its
266 branch trace information struct.
267
268 Throws an error if there is no thread or no trace. This function never
269 returns NULL. */
270
271static struct btrace_thread_info *
272require_btrace (void)
273{
274 struct thread_info *tp;
275
276 tp = require_btrace_thread ();
277
278 return &tp->btrace;
afedecd3
MM
279}
280
281/* Enable branch tracing for one thread. Warn on errors. */
282
283static void
284record_btrace_enable_warn (struct thread_info *tp)
285{
492d29ea
PA
286 TRY
287 {
288 btrace_enable (tp, &record_btrace_conf);
289 }
290 CATCH (error, RETURN_MASK_ERROR)
291 {
292 warning ("%s", error.message);
293 }
294 END_CATCH
afedecd3
MM
295}
296
afedecd3
MM
297/* Enable automatic tracing of new threads. */
298
299static void
300record_btrace_auto_enable (void)
301{
302 DEBUG ("attach thread observer");
303
76727919
TT
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
afedecd3
MM
306}
307
308/* Disable automatic tracing of new threads. */
309
310static void
311record_btrace_auto_disable (void)
312{
afedecd3
MM
313 DEBUG ("detach thread observer");
314
76727919 315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
316}
317
70ad5bff
MM
318/* The record-btrace async event handler function. */
319
320static void
321record_btrace_handle_async_inferior_event (gdb_client_data data)
322{
323 inferior_event_handler (INF_REG_EVENT, NULL);
324}
325
c0272db5
TW
326/* See record-btrace.h. */
327
328void
329record_btrace_push_target (void)
330{
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
76727919 343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
344}
345
228f1508
SM
346/* Disable btrace on a set of threads on scope exit. */
347
348struct scoped_btrace_disable
349{
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370private:
371 std::forward_list<thread_info *> m_threads;
372};
373
d9f719f1 374/* Open target record-btrace. */
afedecd3 375
d9f719f1
PA
376static void
377record_btrace_target_open (const char *args, int from_tty)
afedecd3 378{
228f1508
SM
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
afedecd3
MM
382 struct thread_info *tp;
383
384 DEBUG ("open");
385
8213266a 386 record_preopen ();
afedecd3
MM
387
388 if (!target_has_execution)
389 error (_("The program is not being run."));
390
034f788c 391 ALL_NON_EXITED_THREADS (tp)
5d5658a1 392 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 393 {
f4abbc16 394 btrace_enable (tp, &record_btrace_conf);
afedecd3 395
228f1508 396 btrace_disable.add_thread (tp);
afedecd3
MM
397 }
398
c0272db5 399 record_btrace_push_target ();
afedecd3 400
228f1508 401 btrace_disable.discard ();
afedecd3
MM
402}
403
f6ac5f3d 404/* The stop_recording method of target record-btrace. */
afedecd3 405
f6ac5f3d
PA
406void
407record_btrace_target::stop_recording ()
afedecd3
MM
408{
409 struct thread_info *tp;
410
411 DEBUG ("stop recording");
412
413 record_btrace_auto_disable ();
414
034f788c 415 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
416 if (tp->btrace.target != NULL)
417 btrace_disable (tp);
418}
419
f6ac5f3d 420/* The disconnect method of target record-btrace. */
c0272db5 421
f6ac5f3d
PA
422void
423record_btrace_target::disconnect (const char *args,
424 int from_tty)
c0272db5 425{
b6a8c27b 426 struct target_ops *beneath = this->beneath ();
c0272db5
TW
427
428 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 429 unpush_target (this);
c0272db5
TW
430
431 /* Forward disconnect. */
f6ac5f3d 432 beneath->disconnect (args, from_tty);
c0272db5
TW
433}
434
f6ac5f3d 435/* The close method of target record-btrace. */
afedecd3 436
f6ac5f3d
PA
437void
438record_btrace_target::close ()
afedecd3 439{
568e808b
MM
440 struct thread_info *tp;
441
70ad5bff
MM
442 if (record_btrace_async_inferior_event_handler != NULL)
443 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
444
99c819ee
MM
445 /* Make sure automatic recording gets disabled even if we did not stop
446 recording before closing the record-btrace target. */
447 record_btrace_auto_disable ();
448
568e808b
MM
449 /* We should have already stopped recording.
450 Tear down btrace in case we have not. */
034f788c 451 ALL_NON_EXITED_THREADS (tp)
568e808b 452 btrace_teardown (tp);
afedecd3
MM
453}
454
f6ac5f3d 455/* The async method of target record-btrace. */
b7d2e916 456
f6ac5f3d
PA
457void
458record_btrace_target::async (int enable)
b7d2e916 459{
6a3753b3 460 if (enable)
b7d2e916
PA
461 mark_async_event_handler (record_btrace_async_inferior_event_handler);
462 else
463 clear_async_event_handler (record_btrace_async_inferior_event_handler);
464
b6a8c27b 465 this->beneath ()->async (enable);
b7d2e916
PA
466}
467
d33501a5
MM
468/* Adjusts the size and returns a human readable size suffix. */
469
470static const char *
471record_btrace_adjust_size (unsigned int *size)
472{
473 unsigned int sz;
474
475 sz = *size;
476
477 if ((sz & ((1u << 30) - 1)) == 0)
478 {
479 *size = sz >> 30;
480 return "GB";
481 }
482 else if ((sz & ((1u << 20) - 1)) == 0)
483 {
484 *size = sz >> 20;
485 return "MB";
486 }
487 else if ((sz & ((1u << 10) - 1)) == 0)
488 {
489 *size = sz >> 10;
490 return "kB";
491 }
492 else
493 return "";
494}
495
496/* Print a BTS configuration. */
497
498static void
499record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
500{
501 const char *suffix;
502 unsigned int size;
503
504 size = conf->size;
505 if (size > 0)
506 {
507 suffix = record_btrace_adjust_size (&size);
508 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
509 }
510}
511
bc504a31 512/* Print an Intel Processor Trace configuration. */
b20a6524
MM
513
514static void
515record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
516{
517 const char *suffix;
518 unsigned int size;
519
520 size = conf->size;
521 if (size > 0)
522 {
523 suffix = record_btrace_adjust_size (&size);
524 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
525 }
526}
527
d33501a5
MM
528/* Print a branch tracing configuration. */
529
530static void
531record_btrace_print_conf (const struct btrace_config *conf)
532{
533 printf_unfiltered (_("Recording format: %s.\n"),
534 btrace_format_string (conf->format));
535
536 switch (conf->format)
537 {
538 case BTRACE_FORMAT_NONE:
539 return;
540
541 case BTRACE_FORMAT_BTS:
542 record_btrace_print_bts_conf (&conf->bts);
543 return;
b20a6524
MM
544
545 case BTRACE_FORMAT_PT:
546 record_btrace_print_pt_conf (&conf->pt);
547 return;
d33501a5
MM
548 }
549
550 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
551}
552
f6ac5f3d 553/* The info_record method of target record-btrace. */
afedecd3 554
f6ac5f3d
PA
555void
556record_btrace_target::info_record ()
afedecd3
MM
557{
558 struct btrace_thread_info *btinfo;
f4abbc16 559 const struct btrace_config *conf;
afedecd3 560 struct thread_info *tp;
31fd9caa 561 unsigned int insns, calls, gaps;
afedecd3
MM
562
563 DEBUG ("info");
564
565 tp = find_thread_ptid (inferior_ptid);
566 if (tp == NULL)
567 error (_("No thread."));
568
cd4007e4
MM
569 validate_registers_access ();
570
f4abbc16
MM
571 btinfo = &tp->btrace;
572
f6ac5f3d 573 conf = ::btrace_conf (btinfo);
f4abbc16 574 if (conf != NULL)
d33501a5 575 record_btrace_print_conf (conf);
f4abbc16 576
4a4495d6 577 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 578
23a7fe75
MM
579 insns = 0;
580 calls = 0;
31fd9caa 581 gaps = 0;
23a7fe75 582
6e07b1d2 583 if (!btrace_is_empty (tp))
23a7fe75
MM
584 {
585 struct btrace_call_iterator call;
586 struct btrace_insn_iterator insn;
587
588 btrace_call_end (&call, btinfo);
589 btrace_call_prev (&call, 1);
5de9129b 590 calls = btrace_call_number (&call);
23a7fe75
MM
591
592 btrace_insn_end (&insn, btinfo);
5de9129b 593 insns = btrace_insn_number (&insn);
31fd9caa 594
69090cee
TW
595 /* If the last instruction is not a gap, it is the current instruction
596 that is not actually part of the record. */
597 if (btrace_insn_get (&insn) != NULL)
598 insns -= 1;
31fd9caa
MM
599
600 gaps = btinfo->ngaps;
23a7fe75 601 }
afedecd3 602
31fd9caa 603 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
604 "for thread %s (%s).\n"), insns, calls, gaps,
605 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
606
607 if (btrace_is_replaying (tp))
608 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
609 btrace_insn_number (btinfo->replay));
afedecd3
MM
610}
611
31fd9caa
MM
612/* Print a decode error. */
613
614static void
615btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
616 enum btrace_format format)
617{
508352a9 618 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 619
112e8700 620 uiout->text (_("["));
508352a9
TW
621 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
622 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 623 {
112e8700
SM
624 uiout->text (_("decode error ("));
625 uiout->field_int ("errcode", errcode);
626 uiout->text (_("): "));
31fd9caa 627 }
112e8700
SM
628 uiout->text (errstr);
629 uiout->text (_("]\n"));
31fd9caa
MM
630}
631
afedecd3
MM
632/* Print an unsigned int. */
633
634static void
635ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
636{
112e8700 637 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
638}
639
f94cc897
MM
640/* A range of source lines. */
641
642struct btrace_line_range
643{
644 /* The symtab this line is from. */
645 struct symtab *symtab;
646
647 /* The first line (inclusive). */
648 int begin;
649
650 /* The last line (exclusive). */
651 int end;
652};
653
654/* Construct a line range. */
655
656static struct btrace_line_range
657btrace_mk_line_range (struct symtab *symtab, int begin, int end)
658{
659 struct btrace_line_range range;
660
661 range.symtab = symtab;
662 range.begin = begin;
663 range.end = end;
664
665 return range;
666}
667
668/* Add a line to a line range. */
669
670static struct btrace_line_range
671btrace_line_range_add (struct btrace_line_range range, int line)
672{
673 if (range.end <= range.begin)
674 {
675 /* This is the first entry. */
676 range.begin = line;
677 range.end = line + 1;
678 }
679 else if (line < range.begin)
680 range.begin = line;
681 else if (range.end < line)
682 range.end = line;
683
684 return range;
685}
686
687/* Return non-zero if RANGE is empty, zero otherwise. */
688
689static int
690btrace_line_range_is_empty (struct btrace_line_range range)
691{
692 return range.end <= range.begin;
693}
694
695/* Return non-zero if LHS contains RHS, zero otherwise. */
696
697static int
698btrace_line_range_contains_range (struct btrace_line_range lhs,
699 struct btrace_line_range rhs)
700{
701 return ((lhs.symtab == rhs.symtab)
702 && (lhs.begin <= rhs.begin)
703 && (rhs.end <= lhs.end));
704}
705
706/* Find the line range associated with PC. */
707
708static struct btrace_line_range
709btrace_find_line_range (CORE_ADDR pc)
710{
711 struct btrace_line_range range;
712 struct linetable_entry *lines;
713 struct linetable *ltable;
714 struct symtab *symtab;
715 int nlines, i;
716
717 symtab = find_pc_line_symtab (pc);
718 if (symtab == NULL)
719 return btrace_mk_line_range (NULL, 0, 0);
720
721 ltable = SYMTAB_LINETABLE (symtab);
722 if (ltable == NULL)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 nlines = ltable->nitems;
726 lines = ltable->item;
727 if (nlines <= 0)
728 return btrace_mk_line_range (symtab, 0, 0);
729
730 range = btrace_mk_line_range (symtab, 0, 0);
731 for (i = 0; i < nlines - 1; i++)
732 {
733 if ((lines[i].pc == pc) && (lines[i].line != 0))
734 range = btrace_line_range_add (range, lines[i].line);
735 }
736
737 return range;
738}
739
740/* Print source lines in LINES to UIOUT.
741
742 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
743 instructions corresponding to that source line. When printing a new source
744 line, we do the cleanups for the open chain and open a new cleanup chain for
745 the new source line. If the source line range in LINES is not empty, this
746 function will leave the cleanup chain for the last printed source line open
747 so instructions can be added to it. */
748
749static void
750btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
751 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
752 gdb::optional<ui_out_emit_list> *asm_list,
753 gdb_disassembly_flags flags)
f94cc897 754{
8d297bbf 755 print_source_lines_flags psl_flags;
f94cc897 756
f94cc897
MM
757 if (flags & DISASSEMBLY_FILENAME)
758 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
759
7ea78b59 760 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 761 {
7ea78b59 762 asm_list->reset ();
f94cc897 763
7ea78b59 764 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
765
766 print_source_lines (lines.symtab, line, line + 1, psl_flags);
767
7ea78b59 768 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
769 }
770}
771
afedecd3
MM
772/* Disassemble a section of the recorded instruction trace. */
773
774static void
23a7fe75 775btrace_insn_history (struct ui_out *uiout,
31fd9caa 776 const struct btrace_thread_info *btinfo,
23a7fe75 777 const struct btrace_insn_iterator *begin,
9a24775b
PA
778 const struct btrace_insn_iterator *end,
779 gdb_disassembly_flags flags)
afedecd3 780{
9a24775b
PA
781 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
782 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 783
f94cc897
MM
784 flags |= DISASSEMBLY_SPECULATIVE;
785
7ea78b59
SM
786 struct gdbarch *gdbarch = target_gdbarch ();
787 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 788
7ea78b59 789 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 790
7ea78b59
SM
791 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
792 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 793
8b172ce7
PA
794 gdb_pretty_print_disassembler disasm (gdbarch);
795
7ea78b59
SM
796 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
797 btrace_insn_next (&it, 1))
afedecd3 798 {
23a7fe75
MM
799 const struct btrace_insn *insn;
800
801 insn = btrace_insn_get (&it);
802
31fd9caa
MM
803 /* A NULL instruction indicates a gap in the trace. */
804 if (insn == NULL)
805 {
806 const struct btrace_config *conf;
807
808 conf = btrace_conf (btinfo);
afedecd3 809
31fd9caa
MM
810 /* We have trace so we must have a configuration. */
811 gdb_assert (conf != NULL);
812
69090cee
TW
813 uiout->field_fmt ("insn-number", "%u",
814 btrace_insn_number (&it));
815 uiout->text ("\t");
816
817 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
818 conf->format);
819 }
820 else
821 {
f94cc897 822 struct disasm_insn dinsn;
da8c46d2 823
f94cc897 824 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 825 {
f94cc897
MM
826 struct btrace_line_range lines;
827
828 lines = btrace_find_line_range (insn->pc);
829 if (!btrace_line_range_is_empty (lines)
830 && !btrace_line_range_contains_range (last_lines, lines))
831 {
7ea78b59
SM
832 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
833 flags);
f94cc897
MM
834 last_lines = lines;
835 }
7ea78b59 836 else if (!src_and_asm_tuple.has_value ())
f94cc897 837 {
7ea78b59
SM
838 gdb_assert (!asm_list.has_value ());
839
840 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
841
f94cc897 842 /* No source information. */
7ea78b59 843 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
844 }
845
7ea78b59
SM
846 gdb_assert (src_and_asm_tuple.has_value ());
847 gdb_assert (asm_list.has_value ());
da8c46d2 848 }
da8c46d2 849
f94cc897
MM
850 memset (&dinsn, 0, sizeof (dinsn));
851 dinsn.number = btrace_insn_number (&it);
852 dinsn.addr = insn->pc;
31fd9caa 853
da8c46d2 854 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 855 dinsn.is_speculative = 1;
da8c46d2 856
8b172ce7 857 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 858 }
afedecd3
MM
859 }
860}
861
f6ac5f3d 862/* The insn_history method of target record-btrace. */
afedecd3 863
f6ac5f3d
PA
864void
865record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
866{
867 struct btrace_thread_info *btinfo;
23a7fe75
MM
868 struct btrace_insn_history *history;
869 struct btrace_insn_iterator begin, end;
afedecd3 870 struct ui_out *uiout;
23a7fe75 871 unsigned int context, covered;
afedecd3
MM
872
873 uiout = current_uiout;
2e783024 874 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 875 context = abs (size);
afedecd3
MM
876 if (context == 0)
877 error (_("Bad record instruction-history-size."));
878
23a7fe75
MM
879 btinfo = require_btrace ();
880 history = btinfo->insn_history;
881 if (history == NULL)
afedecd3 882 {
07bbe694 883 struct btrace_insn_iterator *replay;
afedecd3 884
9a24775b 885 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 886
07bbe694
MM
887 /* If we're replaying, we start at the replay position. Otherwise, we
888 start at the tail of the trace. */
889 replay = btinfo->replay;
890 if (replay != NULL)
891 begin = *replay;
892 else
893 btrace_insn_end (&begin, btinfo);
894
895 /* We start from here and expand in the requested direction. Then we
896 expand in the other direction, as well, to fill up any remaining
897 context. */
898 end = begin;
899 if (size < 0)
900 {
901 /* We want the current position covered, as well. */
902 covered = btrace_insn_next (&end, 1);
903 covered += btrace_insn_prev (&begin, context - covered);
904 covered += btrace_insn_next (&end, context - covered);
905 }
906 else
907 {
908 covered = btrace_insn_next (&end, context);
909 covered += btrace_insn_prev (&begin, context - covered);
910 }
afedecd3
MM
911 }
912 else
913 {
23a7fe75
MM
914 begin = history->begin;
915 end = history->end;
afedecd3 916
9a24775b 917 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 918 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 919
23a7fe75
MM
920 if (size < 0)
921 {
922 end = begin;
923 covered = btrace_insn_prev (&begin, context);
924 }
925 else
926 {
927 begin = end;
928 covered = btrace_insn_next (&end, context);
929 }
afedecd3
MM
930 }
931
23a7fe75 932 if (covered > 0)
31fd9caa 933 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
934 else
935 {
936 if (size < 0)
937 printf_unfiltered (_("At the start of the branch trace record.\n"));
938 else
939 printf_unfiltered (_("At the end of the branch trace record.\n"));
940 }
afedecd3 941
23a7fe75 942 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
943}
944
f6ac5f3d 945/* The insn_history_range method of target record-btrace. */
afedecd3 946
f6ac5f3d
PA
947void
948record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
949 gdb_disassembly_flags flags)
afedecd3
MM
950{
951 struct btrace_thread_info *btinfo;
23a7fe75 952 struct btrace_insn_iterator begin, end;
afedecd3 953 struct ui_out *uiout;
23a7fe75
MM
954 unsigned int low, high;
955 int found;
afedecd3
MM
956
957 uiout = current_uiout;
2e783024 958 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
959 low = from;
960 high = to;
afedecd3 961
9a24775b 962 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
963
964 /* Check for wrap-arounds. */
23a7fe75 965 if (low != from || high != to)
afedecd3
MM
966 error (_("Bad range."));
967
0688d04e 968 if (high < low)
afedecd3
MM
969 error (_("Bad range."));
970
23a7fe75 971 btinfo = require_btrace ();
afedecd3 972
23a7fe75
MM
973 found = btrace_find_insn_by_number (&begin, btinfo, low);
974 if (found == 0)
975 error (_("Range out of bounds."));
afedecd3 976
23a7fe75
MM
977 found = btrace_find_insn_by_number (&end, btinfo, high);
978 if (found == 0)
0688d04e
MM
979 {
980 /* Silently truncate the range. */
981 btrace_insn_end (&end, btinfo);
982 }
983 else
984 {
985 /* We want both begin and end to be inclusive. */
986 btrace_insn_next (&end, 1);
987 }
afedecd3 988
31fd9caa 989 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 990 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
991}
992
f6ac5f3d 993/* The insn_history_from method of target record-btrace. */
afedecd3 994
f6ac5f3d
PA
995void
996record_btrace_target::insn_history_from (ULONGEST from, int size,
997 gdb_disassembly_flags flags)
afedecd3
MM
998{
999 ULONGEST begin, end, context;
1000
1001 context = abs (size);
0688d04e
MM
1002 if (context == 0)
1003 error (_("Bad record instruction-history-size."));
afedecd3
MM
1004
1005 if (size < 0)
1006 {
1007 end = from;
1008
1009 if (from < context)
1010 begin = 0;
1011 else
0688d04e 1012 begin = from - context + 1;
afedecd3
MM
1013 }
1014 else
1015 {
1016 begin = from;
0688d04e 1017 end = from + context - 1;
afedecd3
MM
1018
1019 /* Check for wrap-around. */
1020 if (end < begin)
1021 end = ULONGEST_MAX;
1022 }
1023
f6ac5f3d 1024 insn_history_range (begin, end, flags);
afedecd3
MM
1025}
1026
1027/* Print the instruction number range for a function call history line. */
1028
1029static void
23a7fe75
MM
1030btrace_call_history_insn_range (struct ui_out *uiout,
1031 const struct btrace_function *bfun)
afedecd3 1032{
7acbe133
MM
1033 unsigned int begin, end, size;
1034
0860c437 1035 size = bfun->insn.size ();
7acbe133 1036 gdb_assert (size > 0);
afedecd3 1037
23a7fe75 1038 begin = bfun->insn_offset;
7acbe133 1039 end = begin + size - 1;
afedecd3 1040
23a7fe75 1041 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 1042 uiout->text (",");
23a7fe75 1043 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
1044}
1045
ce0dfbea
MM
1046/* Compute the lowest and highest source line for the instructions in BFUN
1047 and return them in PBEGIN and PEND.
1048 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1049 result from inlining or macro expansion. */
1050
1051static void
1052btrace_compute_src_line_range (const struct btrace_function *bfun,
1053 int *pbegin, int *pend)
1054{
ce0dfbea
MM
1055 struct symtab *symtab;
1056 struct symbol *sym;
ce0dfbea
MM
1057 int begin, end;
1058
1059 begin = INT_MAX;
1060 end = INT_MIN;
1061
1062 sym = bfun->sym;
1063 if (sym == NULL)
1064 goto out;
1065
1066 symtab = symbol_symtab (sym);
1067
0860c437 1068 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1069 {
1070 struct symtab_and_line sal;
1071
0860c437 1072 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1073 if (sal.symtab != symtab || sal.line == 0)
1074 continue;
1075
325fac50
PA
1076 begin = std::min (begin, sal.line);
1077 end = std::max (end, sal.line);
ce0dfbea
MM
1078 }
1079
1080 out:
1081 *pbegin = begin;
1082 *pend = end;
1083}
1084
afedecd3
MM
1085/* Print the source line information for a function call history line. */
1086
1087static void
23a7fe75
MM
1088btrace_call_history_src_line (struct ui_out *uiout,
1089 const struct btrace_function *bfun)
afedecd3
MM
1090{
1091 struct symbol *sym;
23a7fe75 1092 int begin, end;
afedecd3
MM
1093
1094 sym = bfun->sym;
1095 if (sym == NULL)
1096 return;
1097
112e8700 1098 uiout->field_string ("file",
08be3fe3 1099 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1100
ce0dfbea 1101 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1102 if (end < begin)
afedecd3
MM
1103 return;
1104
112e8700
SM
1105 uiout->text (":");
1106 uiout->field_int ("min line", begin);
afedecd3 1107
23a7fe75 1108 if (end == begin)
afedecd3
MM
1109 return;
1110
112e8700
SM
1111 uiout->text (",");
1112 uiout->field_int ("max line", end);
afedecd3
MM
1113}
1114
0b722aec
MM
1115/* Get the name of a branch trace function. */
1116
1117static const char *
1118btrace_get_bfun_name (const struct btrace_function *bfun)
1119{
1120 struct minimal_symbol *msym;
1121 struct symbol *sym;
1122
1123 if (bfun == NULL)
1124 return "??";
1125
1126 msym = bfun->msym;
1127 sym = bfun->sym;
1128
1129 if (sym != NULL)
1130 return SYMBOL_PRINT_NAME (sym);
1131 else if (msym != NULL)
efd66ac6 1132 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1133 else
1134 return "??";
1135}
1136
afedecd3
MM
1137/* Disassemble a section of the recorded function trace. */
1138
1139static void
23a7fe75 1140btrace_call_history (struct ui_out *uiout,
8710b709 1141 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1142 const struct btrace_call_iterator *begin,
1143 const struct btrace_call_iterator *end,
8d297bbf 1144 int int_flags)
afedecd3 1145{
23a7fe75 1146 struct btrace_call_iterator it;
8d297bbf 1147 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1148
8d297bbf 1149 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1150 btrace_call_number (end));
afedecd3 1151
23a7fe75 1152 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1153 {
23a7fe75
MM
1154 const struct btrace_function *bfun;
1155 struct minimal_symbol *msym;
1156 struct symbol *sym;
1157
1158 bfun = btrace_call_get (&it);
23a7fe75 1159 sym = bfun->sym;
0b722aec 1160 msym = bfun->msym;
23a7fe75 1161
afedecd3 1162 /* Print the function index. */
23a7fe75 1163 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1164 uiout->text ("\t");
afedecd3 1165
31fd9caa
MM
1166 /* Indicate gaps in the trace. */
1167 if (bfun->errcode != 0)
1168 {
1169 const struct btrace_config *conf;
1170
1171 conf = btrace_conf (btinfo);
1172
1173 /* We have trace so we must have a configuration. */
1174 gdb_assert (conf != NULL);
1175
1176 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1177
1178 continue;
1179 }
1180
8710b709
MM
1181 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1182 {
1183 int level = bfun->level + btinfo->level, i;
1184
1185 for (i = 0; i < level; ++i)
112e8700 1186 uiout->text (" ");
8710b709
MM
1187 }
1188
1189 if (sym != NULL)
112e8700 1190 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1191 else if (msym != NULL)
112e8700
SM
1192 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1193 else if (!uiout->is_mi_like_p ())
1194 uiout->field_string ("function", "??");
8710b709 1195
1e038f67 1196 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1197 {
112e8700 1198 uiout->text (_("\tinst "));
23a7fe75 1199 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1200 }
1201
1e038f67 1202 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1203 {
112e8700 1204 uiout->text (_("\tat "));
23a7fe75 1205 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1206 }
1207
112e8700 1208 uiout->text ("\n");
afedecd3
MM
1209 }
1210}
1211
f6ac5f3d 1212/* The call_history method of target record-btrace. */
afedecd3 1213
f6ac5f3d
PA
1214void
1215record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1216{
1217 struct btrace_thread_info *btinfo;
23a7fe75
MM
1218 struct btrace_call_history *history;
1219 struct btrace_call_iterator begin, end;
afedecd3 1220 struct ui_out *uiout;
23a7fe75 1221 unsigned int context, covered;
afedecd3
MM
1222
1223 uiout = current_uiout;
2e783024 1224 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1225 context = abs (size);
afedecd3
MM
1226 if (context == 0)
1227 error (_("Bad record function-call-history-size."));
1228
23a7fe75
MM
1229 btinfo = require_btrace ();
1230 history = btinfo->call_history;
1231 if (history == NULL)
afedecd3 1232 {
07bbe694 1233 struct btrace_insn_iterator *replay;
afedecd3 1234
0cb7c7b0 1235 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1236
07bbe694
MM
1237 /* If we're replaying, we start at the replay position. Otherwise, we
1238 start at the tail of the trace. */
1239 replay = btinfo->replay;
1240 if (replay != NULL)
1241 {
07bbe694 1242 begin.btinfo = btinfo;
a0f1b963 1243 begin.index = replay->call_index;
07bbe694
MM
1244 }
1245 else
1246 btrace_call_end (&begin, btinfo);
1247
1248 /* We start from here and expand in the requested direction. Then we
1249 expand in the other direction, as well, to fill up any remaining
1250 context. */
1251 end = begin;
1252 if (size < 0)
1253 {
1254 /* We want the current position covered, as well. */
1255 covered = btrace_call_next (&end, 1);
1256 covered += btrace_call_prev (&begin, context - covered);
1257 covered += btrace_call_next (&end, context - covered);
1258 }
1259 else
1260 {
1261 covered = btrace_call_next (&end, context);
1262 covered += btrace_call_prev (&begin, context- covered);
1263 }
afedecd3
MM
1264 }
1265 else
1266 {
23a7fe75
MM
1267 begin = history->begin;
1268 end = history->end;
afedecd3 1269
0cb7c7b0 1270 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1271 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1272
23a7fe75
MM
1273 if (size < 0)
1274 {
1275 end = begin;
1276 covered = btrace_call_prev (&begin, context);
1277 }
1278 else
1279 {
1280 begin = end;
1281 covered = btrace_call_next (&end, context);
1282 }
afedecd3
MM
1283 }
1284
23a7fe75 1285 if (covered > 0)
8710b709 1286 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1287 else
1288 {
1289 if (size < 0)
1290 printf_unfiltered (_("At the start of the branch trace record.\n"));
1291 else
1292 printf_unfiltered (_("At the end of the branch trace record.\n"));
1293 }
afedecd3 1294
23a7fe75 1295 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1296}
1297
f6ac5f3d 1298/* The call_history_range method of target record-btrace. */
afedecd3 1299
f6ac5f3d
PA
1300void
1301record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1302 record_print_flags flags)
afedecd3
MM
1303{
1304 struct btrace_thread_info *btinfo;
23a7fe75 1305 struct btrace_call_iterator begin, end;
afedecd3 1306 struct ui_out *uiout;
23a7fe75
MM
1307 unsigned int low, high;
1308 int found;
afedecd3
MM
1309
1310 uiout = current_uiout;
2e783024 1311 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1312 low = from;
1313 high = to;
afedecd3 1314
0cb7c7b0 1315 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1316
1317 /* Check for wrap-arounds. */
23a7fe75 1318 if (low != from || high != to)
afedecd3
MM
1319 error (_("Bad range."));
1320
0688d04e 1321 if (high < low)
afedecd3
MM
1322 error (_("Bad range."));
1323
23a7fe75 1324 btinfo = require_btrace ();
afedecd3 1325
23a7fe75
MM
1326 found = btrace_find_call_by_number (&begin, btinfo, low);
1327 if (found == 0)
1328 error (_("Range out of bounds."));
afedecd3 1329
23a7fe75
MM
1330 found = btrace_find_call_by_number (&end, btinfo, high);
1331 if (found == 0)
0688d04e
MM
1332 {
1333 /* Silently truncate the range. */
1334 btrace_call_end (&end, btinfo);
1335 }
1336 else
1337 {
1338 /* We want both begin and end to be inclusive. */
1339 btrace_call_next (&end, 1);
1340 }
afedecd3 1341
8710b709 1342 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1343 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1344}
1345
f6ac5f3d 1346/* The call_history_from method of target record-btrace. */
afedecd3 1347
f6ac5f3d
PA
1348void
1349record_btrace_target::call_history_from (ULONGEST from, int size,
1350 record_print_flags flags)
afedecd3
MM
1351{
1352 ULONGEST begin, end, context;
1353
1354 context = abs (size);
0688d04e
MM
1355 if (context == 0)
1356 error (_("Bad record function-call-history-size."));
afedecd3
MM
1357
1358 if (size < 0)
1359 {
1360 end = from;
1361
1362 if (from < context)
1363 begin = 0;
1364 else
0688d04e 1365 begin = from - context + 1;
afedecd3
MM
1366 }
1367 else
1368 {
1369 begin = from;
0688d04e 1370 end = from + context - 1;
afedecd3
MM
1371
1372 /* Check for wrap-around. */
1373 if (end < begin)
1374 end = ULONGEST_MAX;
1375 }
1376
f6ac5f3d 1377 call_history_range ( begin, end, flags);
afedecd3
MM
1378}
1379
f6ac5f3d 1380/* The record_method method of target record-btrace. */
b158a20f 1381
f6ac5f3d
PA
1382enum record_method
1383record_btrace_target::record_method (ptid_t ptid)
b158a20f 1384{
b158a20f
TW
1385 struct thread_info * const tp = find_thread_ptid (ptid);
1386
1387 if (tp == NULL)
1388 error (_("No thread."));
1389
1390 if (tp->btrace.target == NULL)
1391 return RECORD_METHOD_NONE;
1392
1393 return RECORD_METHOD_BTRACE;
1394}
1395
f6ac5f3d 1396/* The record_is_replaying method of target record-btrace. */
07bbe694 1397
57810aa7 1398bool
f6ac5f3d 1399record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694
MM
1400{
1401 struct thread_info *tp;
1402
034f788c 1403 ALL_NON_EXITED_THREADS (tp)
a52eab48 1404 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
57810aa7 1405 return true;
07bbe694 1406
57810aa7 1407 return false;
07bbe694
MM
1408}
1409
f6ac5f3d 1410/* The record_will_replay method of target record-btrace. */
7ff27e9b 1411
57810aa7 1412bool
f6ac5f3d 1413record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1414{
f6ac5f3d 1415 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1416}
1417
f6ac5f3d 1418/* The xfer_partial method of target record-btrace. */
633785ff 1419
f6ac5f3d
PA
1420enum target_xfer_status
1421record_btrace_target::xfer_partial (enum target_object object,
1422 const char *annex, gdb_byte *readbuf,
1423 const gdb_byte *writebuf, ULONGEST offset,
1424 ULONGEST len, ULONGEST *xfered_len)
633785ff 1425{
633785ff 1426 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1427 if (replay_memory_access == replay_memory_access_read_only
aef92902 1428 && !record_btrace_generating_corefile
f6ac5f3d 1429 && record_is_replaying (inferior_ptid))
633785ff
MM
1430 {
1431 switch (object)
1432 {
1433 case TARGET_OBJECT_MEMORY:
1434 {
1435 struct target_section *section;
1436
1437 /* We do not allow writing memory in general. */
1438 if (writebuf != NULL)
9b409511
YQ
1439 {
1440 *xfered_len = len;
bc113b4e 1441 return TARGET_XFER_UNAVAILABLE;
9b409511 1442 }
633785ff
MM
1443
1444 /* We allow reading readonly memory. */
f6ac5f3d 1445 section = target_section_by_addr (this, offset);
633785ff
MM
1446 if (section != NULL)
1447 {
1448 /* Check if the section we found is readonly. */
1449 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1450 section->the_bfd_section)
1451 & SEC_READONLY) != 0)
1452 {
1453 /* Truncate the request to fit into this section. */
325fac50 1454 len = std::min (len, section->endaddr - offset);
633785ff
MM
1455 break;
1456 }
1457 }
1458
9b409511 1459 *xfered_len = len;
bc113b4e 1460 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1461 }
1462 }
1463 }
1464
1465 /* Forward the request. */
b6a8c27b
PA
1466 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1467 offset, len, xfered_len);
633785ff
MM
1468}
1469
f6ac5f3d 1470/* The insert_breakpoint method of target record-btrace. */
633785ff 1471
f6ac5f3d
PA
1472int
1473record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1474 struct bp_target_info *bp_tgt)
633785ff 1475{
67b5c0c1
MM
1476 const char *old;
1477 int ret;
633785ff
MM
1478
1479 /* Inserting breakpoints requires accessing memory. Allow it for the
1480 duration of this function. */
67b5c0c1
MM
1481 old = replay_memory_access;
1482 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1483
1484 ret = 0;
492d29ea
PA
1485 TRY
1486 {
b6a8c27b 1487 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1488 }
492d29ea
PA
1489 CATCH (except, RETURN_MASK_ALL)
1490 {
6c63c96a 1491 replay_memory_access = old;
492d29ea
PA
1492 throw_exception (except);
1493 }
1494 END_CATCH
6c63c96a 1495 replay_memory_access = old;
633785ff
MM
1496
1497 return ret;
1498}
1499
f6ac5f3d 1500/* The remove_breakpoint method of target record-btrace. */
633785ff 1501
f6ac5f3d
PA
1502int
1503record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1504 struct bp_target_info *bp_tgt,
1505 enum remove_bp_reason reason)
633785ff 1506{
67b5c0c1
MM
1507 const char *old;
1508 int ret;
633785ff
MM
1509
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
67b5c0c1
MM
1512 old = replay_memory_access;
1513 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1514
1515 ret = 0;
492d29ea
PA
1516 TRY
1517 {
b6a8c27b 1518 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1519 }
492d29ea
PA
1520 CATCH (except, RETURN_MASK_ALL)
1521 {
6c63c96a 1522 replay_memory_access = old;
492d29ea
PA
1523 throw_exception (except);
1524 }
1525 END_CATCH
6c63c96a 1526 replay_memory_access = old;
633785ff
MM
1527
1528 return ret;
1529}
1530
f6ac5f3d 1531/* The fetch_registers method of target record-btrace. */
1f3ef581 1532
f6ac5f3d
PA
1533void
1534record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1535{
1536 struct btrace_insn_iterator *replay;
1537 struct thread_info *tp;
1538
222312d3 1539 tp = find_thread_ptid (regcache->ptid ());
1f3ef581
MM
1540 gdb_assert (tp != NULL);
1541
1542 replay = tp->btrace.replay;
aef92902 1543 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1544 {
1545 const struct btrace_insn *insn;
1546 struct gdbarch *gdbarch;
1547 int pcreg;
1548
ac7936df 1549 gdbarch = regcache->arch ();
1f3ef581
MM
1550 pcreg = gdbarch_pc_regnum (gdbarch);
1551 if (pcreg < 0)
1552 return;
1553
1554 /* We can only provide the PC register. */
1555 if (regno >= 0 && regno != pcreg)
1556 return;
1557
1558 insn = btrace_insn_get (replay);
1559 gdb_assert (insn != NULL);
1560
73e1c03f 1561 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1562 }
1563 else
b6a8c27b 1564 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1565}
1566
f6ac5f3d 1567/* The store_registers method of target record-btrace. */
1f3ef581 1568
f6ac5f3d
PA
1569void
1570record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581
MM
1571{
1572 struct target_ops *t;
1573
a52eab48 1574 if (!record_btrace_generating_corefile
222312d3 1575 && record_is_replaying (regcache->ptid ()))
4d10e986 1576 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1577
1578 gdb_assert (may_write_registers != 0);
1579
b6a8c27b 1580 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1581}
1582
f6ac5f3d 1583/* The prepare_to_store method of target record-btrace. */
1f3ef581 1584
f6ac5f3d
PA
1585void
1586record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1587{
a52eab48 1588 if (!record_btrace_generating_corefile
222312d3 1589 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1590 return;
1591
b6a8c27b 1592 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1593}
1594
0b722aec
MM
1595/* The branch trace frame cache. */
1596
1597struct btrace_frame_cache
1598{
1599 /* The thread. */
1600 struct thread_info *tp;
1601
1602 /* The frame info. */
1603 struct frame_info *frame;
1604
1605 /* The branch trace function segment. */
1606 const struct btrace_function *bfun;
1607};
1608
1609/* A struct btrace_frame_cache hash table indexed by NEXT. */
1610
1611static htab_t bfcache;
1612
1613/* hash_f for htab_create_alloc of bfcache. */
1614
1615static hashval_t
1616bfcache_hash (const void *arg)
1617{
19ba03f4
SM
1618 const struct btrace_frame_cache *cache
1619 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1620
1621 return htab_hash_pointer (cache->frame);
1622}
1623
1624/* eq_f for htab_create_alloc of bfcache. */
1625
1626static int
1627bfcache_eq (const void *arg1, const void *arg2)
1628{
19ba03f4
SM
1629 const struct btrace_frame_cache *cache1
1630 = (const struct btrace_frame_cache *) arg1;
1631 const struct btrace_frame_cache *cache2
1632 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1633
1634 return cache1->frame == cache2->frame;
1635}
1636
1637/* Create a new btrace frame cache. */
1638
1639static struct btrace_frame_cache *
1640bfcache_new (struct frame_info *frame)
1641{
1642 struct btrace_frame_cache *cache;
1643 void **slot;
1644
1645 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1646 cache->frame = frame;
1647
1648 slot = htab_find_slot (bfcache, cache, INSERT);
1649 gdb_assert (*slot == NULL);
1650 *slot = cache;
1651
1652 return cache;
1653}
1654
1655/* Extract the branch trace function from a branch trace frame. */
1656
1657static const struct btrace_function *
1658btrace_get_frame_function (struct frame_info *frame)
1659{
1660 const struct btrace_frame_cache *cache;
0b722aec
MM
1661 struct btrace_frame_cache pattern;
1662 void **slot;
1663
1664 pattern.frame = frame;
1665
1666 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1667 if (slot == NULL)
1668 return NULL;
1669
19ba03f4 1670 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1671 return cache->bfun;
1672}
1673
cecac1ab
MM
1674/* Implement stop_reason method for record_btrace_frame_unwind. */
1675
1676static enum unwind_stop_reason
1677record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1678 void **this_cache)
1679{
0b722aec
MM
1680 const struct btrace_frame_cache *cache;
1681 const struct btrace_function *bfun;
1682
19ba03f4 1683 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1684 bfun = cache->bfun;
1685 gdb_assert (bfun != NULL);
1686
42bfe59e 1687 if (bfun->up == 0)
0b722aec
MM
1688 return UNWIND_UNAVAILABLE;
1689
1690 return UNWIND_NO_REASON;
cecac1ab
MM
1691}
1692
1693/* Implement this_id method for record_btrace_frame_unwind. */
1694
1695static void
1696record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1697 struct frame_id *this_id)
1698{
0b722aec
MM
1699 const struct btrace_frame_cache *cache;
1700 const struct btrace_function *bfun;
4aeb0dfc 1701 struct btrace_call_iterator it;
0b722aec
MM
1702 CORE_ADDR code, special;
1703
19ba03f4 1704 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1705
1706 bfun = cache->bfun;
1707 gdb_assert (bfun != NULL);
1708
4aeb0dfc
TW
1709 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1710 bfun = btrace_call_get (&it);
0b722aec
MM
1711
1712 code = get_frame_func (this_frame);
1713 special = bfun->number;
1714
1715 *this_id = frame_id_build_unavailable_stack_special (code, special);
1716
1717 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1718 btrace_get_bfun_name (cache->bfun),
1719 core_addr_to_string_nz (this_id->code_addr),
1720 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1721}
1722
1723/* Implement prev_register method for record_btrace_frame_unwind. */
1724
1725static struct value *
1726record_btrace_frame_prev_register (struct frame_info *this_frame,
1727 void **this_cache,
1728 int regnum)
1729{
0b722aec
MM
1730 const struct btrace_frame_cache *cache;
1731 const struct btrace_function *bfun, *caller;
42bfe59e 1732 struct btrace_call_iterator it;
0b722aec
MM
1733 struct gdbarch *gdbarch;
1734 CORE_ADDR pc;
1735 int pcreg;
1736
1737 gdbarch = get_frame_arch (this_frame);
1738 pcreg = gdbarch_pc_regnum (gdbarch);
1739 if (pcreg < 0 || regnum != pcreg)
1740 throw_error (NOT_AVAILABLE_ERROR,
1741 _("Registers are not available in btrace record history"));
1742
19ba03f4 1743 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1744 bfun = cache->bfun;
1745 gdb_assert (bfun != NULL);
1746
42bfe59e 1747 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1748 throw_error (NOT_AVAILABLE_ERROR,
1749 _("No caller in btrace record history"));
1750
42bfe59e
TW
1751 caller = btrace_call_get (&it);
1752
0b722aec 1753 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1754 pc = caller->insn.front ().pc;
0b722aec
MM
1755 else
1756 {
0860c437 1757 pc = caller->insn.back ().pc;
0b722aec
MM
1758 pc += gdb_insn_length (gdbarch, pc);
1759 }
1760
1761 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1762 btrace_get_bfun_name (bfun), bfun->level,
1763 core_addr_to_string_nz (pc));
1764
1765 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1766}
1767
1768/* Implement sniffer method for record_btrace_frame_unwind. */
1769
1770static int
1771record_btrace_frame_sniffer (const struct frame_unwind *self,
1772 struct frame_info *this_frame,
1773 void **this_cache)
1774{
0b722aec
MM
1775 const struct btrace_function *bfun;
1776 struct btrace_frame_cache *cache;
cecac1ab 1777 struct thread_info *tp;
0b722aec 1778 struct frame_info *next;
cecac1ab
MM
1779
1780 /* THIS_FRAME does not contain a reference to its thread. */
1781 tp = find_thread_ptid (inferior_ptid);
1782 gdb_assert (tp != NULL);
1783
0b722aec
MM
1784 bfun = NULL;
1785 next = get_next_frame (this_frame);
1786 if (next == NULL)
1787 {
1788 const struct btrace_insn_iterator *replay;
1789
1790 replay = tp->btrace.replay;
1791 if (replay != NULL)
08c3f6d2 1792 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1793 }
1794 else
1795 {
1796 const struct btrace_function *callee;
42bfe59e 1797 struct btrace_call_iterator it;
0b722aec
MM
1798
1799 callee = btrace_get_frame_function (next);
42bfe59e
TW
1800 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1801 return 0;
1802
1803 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1804 return 0;
1805
1806 bfun = btrace_call_get (&it);
0b722aec
MM
1807 }
1808
1809 if (bfun == NULL)
1810 return 0;
1811
1812 DEBUG ("[frame] sniffed frame for %s on level %d",
1813 btrace_get_bfun_name (bfun), bfun->level);
1814
1815 /* This is our frame. Initialize the frame cache. */
1816 cache = bfcache_new (this_frame);
1817 cache->tp = tp;
1818 cache->bfun = bfun;
1819
1820 *this_cache = cache;
1821 return 1;
1822}
1823
1824/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1825
1826static int
1827record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1828 struct frame_info *this_frame,
1829 void **this_cache)
1830{
1831 const struct btrace_function *bfun, *callee;
1832 struct btrace_frame_cache *cache;
42bfe59e 1833 struct btrace_call_iterator it;
0b722aec 1834 struct frame_info *next;
42bfe59e 1835 struct thread_info *tinfo;
0b722aec
MM
1836
1837 next = get_next_frame (this_frame);
1838 if (next == NULL)
1839 return 0;
1840
1841 callee = btrace_get_frame_function (next);
1842 if (callee == NULL)
1843 return 0;
1844
1845 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1846 return 0;
1847
42bfe59e
TW
1848 tinfo = find_thread_ptid (inferior_ptid);
1849 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1850 return 0;
1851
42bfe59e
TW
1852 bfun = btrace_call_get (&it);
1853
0b722aec
MM
1854 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1855 btrace_get_bfun_name (bfun), bfun->level);
1856
1857 /* This is our frame. Initialize the frame cache. */
1858 cache = bfcache_new (this_frame);
42bfe59e 1859 cache->tp = tinfo;
0b722aec
MM
1860 cache->bfun = bfun;
1861
1862 *this_cache = cache;
1863 return 1;
1864}
1865
1866static void
1867record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1868{
1869 struct btrace_frame_cache *cache;
1870 void **slot;
1871
19ba03f4 1872 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1873
1874 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1875 gdb_assert (slot != NULL);
1876
1877 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1878}
1879
1880/* btrace recording does not store previous memory content, neither the stack
1881 frames content. Any unwinding would return errorneous results as the stack
1882 contents no longer matches the changed PC value restored from history.
1883 Therefore this unwinder reports any possibly unwound registers as
1884 <unavailable>. */
1885
0b722aec 1886const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1887{
1888 NORMAL_FRAME,
1889 record_btrace_frame_unwind_stop_reason,
1890 record_btrace_frame_this_id,
1891 record_btrace_frame_prev_register,
1892 NULL,
0b722aec
MM
1893 record_btrace_frame_sniffer,
1894 record_btrace_frame_dealloc_cache
1895};
1896
1897const struct frame_unwind record_btrace_tailcall_frame_unwind =
1898{
1899 TAILCALL_FRAME,
1900 record_btrace_frame_unwind_stop_reason,
1901 record_btrace_frame_this_id,
1902 record_btrace_frame_prev_register,
1903 NULL,
1904 record_btrace_tailcall_frame_sniffer,
1905 record_btrace_frame_dealloc_cache
cecac1ab 1906};
b2f4cfde 1907
f6ac5f3d 1908/* Implement the get_unwinder method. */
ac01945b 1909
f6ac5f3d
PA
1910const struct frame_unwind *
1911record_btrace_target::get_unwinder ()
ac01945b
TT
1912{
1913 return &record_btrace_frame_unwind;
1914}
1915
f6ac5f3d 1916/* Implement the get_tailcall_unwinder method. */
ac01945b 1917
f6ac5f3d
PA
1918const struct frame_unwind *
1919record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1920{
1921 return &record_btrace_tailcall_frame_unwind;
1922}
1923
987e68b1
MM
1924/* Return a human-readable string for FLAG. */
1925
1926static const char *
1927btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1928{
1929 switch (flag)
1930 {
1931 case BTHR_STEP:
1932 return "step";
1933
1934 case BTHR_RSTEP:
1935 return "reverse-step";
1936
1937 case BTHR_CONT:
1938 return "cont";
1939
1940 case BTHR_RCONT:
1941 return "reverse-cont";
1942
1943 case BTHR_STOP:
1944 return "stop";
1945 }
1946
1947 return "<invalid>";
1948}
1949
52834460
MM
1950/* Indicate that TP should be resumed according to FLAG. */
1951
1952static void
1953record_btrace_resume_thread (struct thread_info *tp,
1954 enum btrace_thread_flag flag)
1955{
1956 struct btrace_thread_info *btinfo;
1957
43792cf0 1958 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1959 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1960
1961 btinfo = &tp->btrace;
1962
52834460 1963 /* Fetch the latest branch trace. */
4a4495d6 1964 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1965
0ca912df
MM
1966 /* A resume request overwrites a preceding resume or stop request. */
1967 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1968 btinfo->flags |= flag;
1969}
1970
ec71cc2f
MM
1971/* Get the current frame for TP. */
1972
1973static struct frame_info *
1974get_thread_current_frame (struct thread_info *tp)
1975{
1976 struct frame_info *frame;
1977 ptid_t old_inferior_ptid;
1978 int executing;
1979
1980 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1981 old_inferior_ptid = inferior_ptid;
1982 inferior_ptid = tp->ptid;
1983
1984 /* Clear the executing flag to allow changes to the current frame.
1985 We are not actually running, yet. We just started a reverse execution
1986 command or a record goto command.
1987 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1988 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f
MM
1989 move the thread. Since we need to recompute the stack, we temporarily
1990 set EXECUTING to flase. */
1991 executing = is_executing (inferior_ptid);
1992 set_executing (inferior_ptid, 0);
1993
1994 frame = NULL;
1995 TRY
1996 {
1997 frame = get_current_frame ();
1998 }
1999 CATCH (except, RETURN_MASK_ALL)
2000 {
2001 /* Restore the previous execution state. */
2002 set_executing (inferior_ptid, executing);
2003
2004 /* Restore the previous inferior_ptid. */
2005 inferior_ptid = old_inferior_ptid;
2006
2007 throw_exception (except);
2008 }
2009 END_CATCH
2010
2011 /* Restore the previous execution state. */
2012 set_executing (inferior_ptid, executing);
2013
2014 /* Restore the previous inferior_ptid. */
2015 inferior_ptid = old_inferior_ptid;
2016
2017 return frame;
2018}
2019
52834460
MM
2020/* Start replaying a thread. */
2021
2022static struct btrace_insn_iterator *
2023record_btrace_start_replaying (struct thread_info *tp)
2024{
52834460
MM
2025 struct btrace_insn_iterator *replay;
2026 struct btrace_thread_info *btinfo;
52834460
MM
2027
2028 btinfo = &tp->btrace;
2029 replay = NULL;
2030
2031 /* We can't start replaying without trace. */
b54b03bd 2032 if (btinfo->functions.empty ())
52834460
MM
2033 return NULL;
2034
52834460
MM
2035 /* GDB stores the current frame_id when stepping in order to detects steps
2036 into subroutines.
2037 Since frames are computed differently when we're replaying, we need to
2038 recompute those stored frames and fix them up so we can still detect
2039 subroutines after we started replaying. */
492d29ea 2040 TRY
52834460
MM
2041 {
2042 struct frame_info *frame;
2043 struct frame_id frame_id;
2044 int upd_step_frame_id, upd_step_stack_frame_id;
2045
2046 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 2047 frame = get_thread_current_frame (tp);
52834460
MM
2048 frame_id = get_frame_id (frame);
2049
2050 /* Check if we need to update any stepping-related frame id's. */
2051 upd_step_frame_id = frame_id_eq (frame_id,
2052 tp->control.step_frame_id);
2053 upd_step_stack_frame_id = frame_id_eq (frame_id,
2054 tp->control.step_stack_frame_id);
2055
2056 /* We start replaying at the end of the branch trace. This corresponds
2057 to the current instruction. */
8d749320 2058 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2059 btrace_insn_end (replay, btinfo);
2060
31fd9caa
MM
2061 /* Skip gaps at the end of the trace. */
2062 while (btrace_insn_get (replay) == NULL)
2063 {
2064 unsigned int steps;
2065
2066 steps = btrace_insn_prev (replay, 1);
2067 if (steps == 0)
2068 error (_("No trace."));
2069 }
2070
52834460
MM
2071 /* We're not replaying, yet. */
2072 gdb_assert (btinfo->replay == NULL);
2073 btinfo->replay = replay;
2074
2075 /* Make sure we're not using any stale registers. */
2076 registers_changed_ptid (tp->ptid);
2077
2078 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2079 frame = get_thread_current_frame (tp);
52834460
MM
2080 frame_id = get_frame_id (frame);
2081
2082 /* Replace stepping related frames where necessary. */
2083 if (upd_step_frame_id)
2084 tp->control.step_frame_id = frame_id;
2085 if (upd_step_stack_frame_id)
2086 tp->control.step_stack_frame_id = frame_id;
2087 }
492d29ea 2088 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2089 {
2090 xfree (btinfo->replay);
2091 btinfo->replay = NULL;
2092
2093 registers_changed_ptid (tp->ptid);
2094
2095 throw_exception (except);
2096 }
492d29ea 2097 END_CATCH
52834460
MM
2098
2099 return replay;
2100}
2101
2102/* Stop replaying a thread. */
2103
2104static void
2105record_btrace_stop_replaying (struct thread_info *tp)
2106{
2107 struct btrace_thread_info *btinfo;
2108
2109 btinfo = &tp->btrace;
2110
2111 xfree (btinfo->replay);
2112 btinfo->replay = NULL;
2113
2114 /* Make sure we're not leaving any stale registers. */
2115 registers_changed_ptid (tp->ptid);
2116}
2117
e3cfc1c7
MM
2118/* Stop replaying TP if it is at the end of its execution history. */
2119
2120static void
2121record_btrace_stop_replaying_at_end (struct thread_info *tp)
2122{
2123 struct btrace_insn_iterator *replay, end;
2124 struct btrace_thread_info *btinfo;
2125
2126 btinfo = &tp->btrace;
2127 replay = btinfo->replay;
2128
2129 if (replay == NULL)
2130 return;
2131
2132 btrace_insn_end (&end, btinfo);
2133
2134 if (btrace_insn_cmp (replay, &end) == 0)
2135 record_btrace_stop_replaying (tp);
2136}
2137
f6ac5f3d 2138/* The resume method of target record-btrace. */
b2f4cfde 2139
f6ac5f3d
PA
2140void
2141record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2142{
0ca912df 2143 struct thread_info *tp;
d2939ba2 2144 enum btrace_thread_flag flag, cflag;
52834460 2145
987e68b1 2146 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
f6ac5f3d 2147 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2148 step ? "step" : "cont");
52834460 2149
0ca912df
MM
2150 /* Store the execution direction of the last resume.
2151
f6ac5f3d 2152 If there is more than one resume call, we have to rely on infrun
0ca912df 2153 to not change the execution direction in-between. */
f6ac5f3d 2154 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2155
0ca912df 2156 /* As long as we're not replaying, just forward the request.
52834460 2157
0ca912df
MM
2158 For non-stop targets this means that no thread is replaying. In order to
2159 make progress, we may need to explicitly move replaying threads to the end
2160 of their execution history. */
f6ac5f3d
PA
2161 if ((::execution_direction != EXEC_REVERSE)
2162 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2163 {
b6a8c27b 2164 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2165 return;
b2f4cfde
MM
2166 }
2167
52834460 2168 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2169 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2170 {
2171 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2172 cflag = BTHR_RCONT;
2173 }
52834460 2174 else
d2939ba2
MM
2175 {
2176 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2177 cflag = BTHR_CONT;
2178 }
52834460 2179
52834460 2180 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2181 record_btrace_wait below.
2182
2183 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2184 if (!target_is_non_stop_p ())
2185 {
2186 gdb_assert (ptid_match (inferior_ptid, ptid));
2187
2188 ALL_NON_EXITED_THREADS (tp)
2189 if (ptid_match (tp->ptid, ptid))
2190 {
2191 if (ptid_match (tp->ptid, inferior_ptid))
2192 record_btrace_resume_thread (tp, flag);
2193 else
2194 record_btrace_resume_thread (tp, cflag);
2195 }
2196 }
2197 else
2198 {
2199 ALL_NON_EXITED_THREADS (tp)
2200 if (ptid_match (tp->ptid, ptid))
2201 record_btrace_resume_thread (tp, flag);
2202 }
70ad5bff
MM
2203
2204 /* Async support. */
2205 if (target_can_async_p ())
2206 {
6a3753b3 2207 target_async (1);
70ad5bff
MM
2208 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2209 }
52834460
MM
2210}
2211
f6ac5f3d 2212/* The commit_resume method of target record-btrace. */
85ad3aaf 2213
f6ac5f3d
PA
2214void
2215record_btrace_target::commit_resume ()
85ad3aaf 2216{
f6ac5f3d
PA
2217 if ((::execution_direction != EXEC_REVERSE)
2218 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2219 beneath ()->commit_resume ();
85ad3aaf
PA
2220}
2221
987e68b1
MM
2222/* Cancel resuming TP. */
2223
2224static void
2225record_btrace_cancel_resume (struct thread_info *tp)
2226{
2227 enum btrace_thread_flag flags;
2228
2229 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2230 if (flags == 0)
2231 return;
2232
43792cf0
PA
2233 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2234 print_thread_id (tp),
987e68b1
MM
2235 target_pid_to_str (tp->ptid), flags,
2236 btrace_thread_flag_to_str (flags));
2237
2238 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2239 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2240}
2241
2242/* Return a target_waitstatus indicating that we ran out of history. */
2243
2244static struct target_waitstatus
2245btrace_step_no_history (void)
2246{
2247 struct target_waitstatus status;
2248
2249 status.kind = TARGET_WAITKIND_NO_HISTORY;
2250
2251 return status;
2252}
2253
2254/* Return a target_waitstatus indicating that a step finished. */
2255
2256static struct target_waitstatus
2257btrace_step_stopped (void)
2258{
2259 struct target_waitstatus status;
2260
2261 status.kind = TARGET_WAITKIND_STOPPED;
2262 status.value.sig = GDB_SIGNAL_TRAP;
2263
2264 return status;
2265}
2266
6e4879f0
MM
2267/* Return a target_waitstatus indicating that a thread was stopped as
2268 requested. */
2269
2270static struct target_waitstatus
2271btrace_step_stopped_on_request (void)
2272{
2273 struct target_waitstatus status;
2274
2275 status.kind = TARGET_WAITKIND_STOPPED;
2276 status.value.sig = GDB_SIGNAL_0;
2277
2278 return status;
2279}
2280
d825d248
MM
2281/* Return a target_waitstatus indicating a spurious stop. */
2282
2283static struct target_waitstatus
2284btrace_step_spurious (void)
2285{
2286 struct target_waitstatus status;
2287
2288 status.kind = TARGET_WAITKIND_SPURIOUS;
2289
2290 return status;
2291}
2292
e3cfc1c7
MM
2293/* Return a target_waitstatus indicating that the thread was not resumed. */
2294
2295static struct target_waitstatus
2296btrace_step_no_resumed (void)
2297{
2298 struct target_waitstatus status;
2299
2300 status.kind = TARGET_WAITKIND_NO_RESUMED;
2301
2302 return status;
2303}
2304
2305/* Return a target_waitstatus indicating that we should wait again. */
2306
2307static struct target_waitstatus
2308btrace_step_again (void)
2309{
2310 struct target_waitstatus status;
2311
2312 status.kind = TARGET_WAITKIND_IGNORE;
2313
2314 return status;
2315}
2316
52834460
MM
2317/* Clear the record histories. */
2318
2319static void
2320record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2321{
2322 xfree (btinfo->insn_history);
2323 xfree (btinfo->call_history);
2324
2325 btinfo->insn_history = NULL;
2326 btinfo->call_history = NULL;
2327}
2328
3c615f99
MM
2329/* Check whether TP's current replay position is at a breakpoint. */
2330
2331static int
2332record_btrace_replay_at_breakpoint (struct thread_info *tp)
2333{
2334 struct btrace_insn_iterator *replay;
2335 struct btrace_thread_info *btinfo;
2336 const struct btrace_insn *insn;
2337 struct inferior *inf;
2338
2339 btinfo = &tp->btrace;
2340 replay = btinfo->replay;
2341
2342 if (replay == NULL)
2343 return 0;
2344
2345 insn = btrace_insn_get (replay);
2346 if (insn == NULL)
2347 return 0;
2348
2349 inf = find_inferior_ptid (tp->ptid);
2350 if (inf == NULL)
2351 return 0;
2352
2353 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2354 &btinfo->stop_reason);
2355}
2356
d825d248 2357/* Step one instruction in forward direction. */
52834460
MM
2358
2359static struct target_waitstatus
d825d248 2360record_btrace_single_step_forward (struct thread_info *tp)
52834460 2361{
b61ce85c 2362 struct btrace_insn_iterator *replay, end, start;
52834460 2363 struct btrace_thread_info *btinfo;
52834460 2364
d825d248
MM
2365 btinfo = &tp->btrace;
2366 replay = btinfo->replay;
2367
2368 /* We're done if we're not replaying. */
2369 if (replay == NULL)
2370 return btrace_step_no_history ();
2371
011c71b6
MM
2372 /* Check if we're stepping a breakpoint. */
2373 if (record_btrace_replay_at_breakpoint (tp))
2374 return btrace_step_stopped ();
2375
b61ce85c
MM
2376 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2377 jump back to the instruction at which we started. */
2378 start = *replay;
d825d248
MM
2379 do
2380 {
2381 unsigned int steps;
2382
e3cfc1c7
MM
2383 /* We will bail out here if we continue stepping after reaching the end
2384 of the execution history. */
d825d248
MM
2385 steps = btrace_insn_next (replay, 1);
2386 if (steps == 0)
b61ce85c
MM
2387 {
2388 *replay = start;
2389 return btrace_step_no_history ();
2390 }
d825d248
MM
2391 }
2392 while (btrace_insn_get (replay) == NULL);
2393
2394 /* Determine the end of the instruction trace. */
2395 btrace_insn_end (&end, btinfo);
2396
e3cfc1c7
MM
2397 /* The execution trace contains (and ends with) the current instruction.
2398 This instruction has not been executed, yet, so the trace really ends
2399 one instruction earlier. */
d825d248 2400 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2401 return btrace_step_no_history ();
d825d248
MM
2402
2403 return btrace_step_spurious ();
2404}
2405
2406/* Step one instruction in backward direction. */
2407
2408static struct target_waitstatus
2409record_btrace_single_step_backward (struct thread_info *tp)
2410{
b61ce85c 2411 struct btrace_insn_iterator *replay, start;
d825d248 2412 struct btrace_thread_info *btinfo;
e59fa00f 2413
52834460
MM
2414 btinfo = &tp->btrace;
2415 replay = btinfo->replay;
2416
d825d248
MM
2417 /* Start replaying if we're not already doing so. */
2418 if (replay == NULL)
2419 replay = record_btrace_start_replaying (tp);
2420
2421 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2422 Skip gaps during replay. If we end up at a gap (at the beginning of
2423 the trace), jump back to the instruction at which we started. */
2424 start = *replay;
d825d248
MM
2425 do
2426 {
2427 unsigned int steps;
2428
2429 steps = btrace_insn_prev (replay, 1);
2430 if (steps == 0)
b61ce85c
MM
2431 {
2432 *replay = start;
2433 return btrace_step_no_history ();
2434 }
d825d248
MM
2435 }
2436 while (btrace_insn_get (replay) == NULL);
2437
011c71b6
MM
2438 /* Check if we're stepping a breakpoint.
2439
2440 For reverse-stepping, this check is after the step. There is logic in
2441 infrun.c that handles reverse-stepping separately. See, for example,
2442 proceed and adjust_pc_after_break.
2443
2444 This code assumes that for reverse-stepping, PC points to the last
2445 de-executed instruction, whereas for forward-stepping PC points to the
2446 next to-be-executed instruction. */
2447 if (record_btrace_replay_at_breakpoint (tp))
2448 return btrace_step_stopped ();
2449
d825d248
MM
2450 return btrace_step_spurious ();
2451}
2452
2453/* Step a single thread. */
2454
2455static struct target_waitstatus
2456record_btrace_step_thread (struct thread_info *tp)
2457{
2458 struct btrace_thread_info *btinfo;
2459 struct target_waitstatus status;
2460 enum btrace_thread_flag flags;
2461
2462 btinfo = &tp->btrace;
2463
6e4879f0
MM
2464 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2465 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2466
43792cf0 2467 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2468 target_pid_to_str (tp->ptid), flags,
2469 btrace_thread_flag_to_str (flags));
52834460 2470
6e4879f0
MM
2471 /* We can't step without an execution history. */
2472 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2473 return btrace_step_no_history ();
2474
52834460
MM
2475 switch (flags)
2476 {
2477 default:
2478 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2479
6e4879f0
MM
2480 case BTHR_STOP:
2481 return btrace_step_stopped_on_request ();
2482
52834460 2483 case BTHR_STEP:
d825d248
MM
2484 status = record_btrace_single_step_forward (tp);
2485 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2486 break;
52834460
MM
2487
2488 return btrace_step_stopped ();
2489
2490 case BTHR_RSTEP:
d825d248
MM
2491 status = record_btrace_single_step_backward (tp);
2492 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2493 break;
52834460
MM
2494
2495 return btrace_step_stopped ();
2496
2497 case BTHR_CONT:
e3cfc1c7
MM
2498 status = record_btrace_single_step_forward (tp);
2499 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2500 break;
52834460 2501
e3cfc1c7
MM
2502 btinfo->flags |= flags;
2503 return btrace_step_again ();
52834460
MM
2504
2505 case BTHR_RCONT:
e3cfc1c7
MM
2506 status = record_btrace_single_step_backward (tp);
2507 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2508 break;
52834460 2509
e3cfc1c7
MM
2510 btinfo->flags |= flags;
2511 return btrace_step_again ();
2512 }
d825d248 2513
f6ac5f3d 2514 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2515 method will stop the thread for whom the event is reported. */
2516 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2517 btinfo->flags |= flags;
52834460 2518
e3cfc1c7 2519 return status;
b2f4cfde
MM
2520}
2521
a6b5be76
MM
2522/* Announce further events if necessary. */
2523
2524static void
53127008
SM
2525record_btrace_maybe_mark_async_event
2526 (const std::vector<thread_info *> &moving,
2527 const std::vector<thread_info *> &no_history)
a6b5be76 2528{
53127008
SM
2529 bool more_moving = !moving.empty ();
2530 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2531
2532 if (!more_moving && !more_no_history)
2533 return;
2534
2535 if (more_moving)
2536 DEBUG ("movers pending");
2537
2538 if (more_no_history)
2539 DEBUG ("no-history pending");
2540
2541 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2542}
2543
f6ac5f3d 2544/* The wait method of target record-btrace. */
b2f4cfde 2545
f6ac5f3d
PA
2546ptid_t
2547record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2548 int options)
b2f4cfde 2549{
53127008
SM
2550 std::vector<thread_info *> moving;
2551 std::vector<thread_info *> no_history;
52834460
MM
2552
2553 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2554
b2f4cfde 2555 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2556 if ((::execution_direction != EXEC_REVERSE)
2557 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2558 {
b6a8c27b 2559 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2560 }
2561
e3cfc1c7 2562 /* Keep a work list of moving threads. */
53127008
SM
2563 {
2564 thread_info *tp;
2565
2566 ALL_NON_EXITED_THREADS (tp)
2567 {
2568 if (ptid_match (tp->ptid, ptid)
2569 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2570 moving.push_back (tp);
2571 }
2572 }
e3cfc1c7 2573
53127008 2574 if (moving.empty ())
52834460 2575 {
e3cfc1c7 2576 *status = btrace_step_no_resumed ();
52834460 2577
e3cfc1c7 2578 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2579 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2580
e3cfc1c7 2581 return null_ptid;
52834460
MM
2582 }
2583
e3cfc1c7
MM
2584 /* Step moving threads one by one, one step each, until either one thread
2585 reports an event or we run out of threads to step.
2586
2587 When stepping more than one thread, chances are that some threads reach
2588 the end of their execution history earlier than others. If we reported
2589 this immediately, all-stop on top of non-stop would stop all threads and
2590 resume the same threads next time. And we would report the same thread
2591 having reached the end of its execution history again.
2592
2593 In the worst case, this would starve the other threads. But even if other
2594 threads would be allowed to make progress, this would result in far too
2595 many intermediate stops.
2596
2597 We therefore delay the reporting of "no execution history" until we have
2598 nothing else to report. By this time, all threads should have moved to
2599 either the beginning or the end of their execution history. There will
2600 be a single user-visible stop. */
53127008
SM
2601 struct thread_info *eventing = NULL;
2602 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2603 {
53127008 2604 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2605 {
53127008
SM
2606 thread_info *tp = moving[ix];
2607
e3cfc1c7
MM
2608 *status = record_btrace_step_thread (tp);
2609
2610 switch (status->kind)
2611 {
2612 case TARGET_WAITKIND_IGNORE:
2613 ix++;
2614 break;
2615
2616 case TARGET_WAITKIND_NO_HISTORY:
53127008 2617 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2618 break;
2619
2620 default:
53127008 2621 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2622 break;
2623 }
2624 }
2625 }
2626
2627 if (eventing == NULL)
2628 {
2629 /* We started with at least one moving thread. This thread must have
2630 either stopped or reached the end of its execution history.
2631
2632 In the former case, EVENTING must not be NULL.
2633 In the latter case, NO_HISTORY must not be empty. */
53127008 2634 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2635
2636 /* We kept threads moving at the end of their execution history. Stop
2637 EVENTING now that we are going to report its stop. */
53127008 2638 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2639 eventing->btrace.flags &= ~BTHR_MOVE;
2640
2641 *status = btrace_step_no_history ();
2642 }
2643
2644 gdb_assert (eventing != NULL);
2645
2646 /* We kept threads replaying at the end of their execution history. Stop
2647 replaying EVENTING now that we are going to report its stop. */
2648 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2649
2650 /* Stop all other threads. */
5953356c 2651 if (!target_is_non_stop_p ())
53127008
SM
2652 {
2653 thread_info *tp;
2654
2655 ALL_NON_EXITED_THREADS (tp)
2656 record_btrace_cancel_resume (tp);
2657 }
52834460 2658
a6b5be76
MM
2659 /* In async mode, we need to announce further events. */
2660 if (target_is_async_p ())
2661 record_btrace_maybe_mark_async_event (moving, no_history);
2662
52834460 2663 /* Start record histories anew from the current position. */
e3cfc1c7 2664 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2665
2666 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2667 registers_changed_ptid (eventing->ptid);
2668
43792cf0
PA
2669 DEBUG ("wait ended by thread %s (%s): %s",
2670 print_thread_id (eventing),
e3cfc1c7 2671 target_pid_to_str (eventing->ptid),
23fdd69e 2672 target_waitstatus_to_string (status).c_str ());
52834460 2673
e3cfc1c7 2674 return eventing->ptid;
52834460
MM
2675}
2676
f6ac5f3d 2677/* The stop method of target record-btrace. */
6e4879f0 2678
f6ac5f3d
PA
2679void
2680record_btrace_target::stop (ptid_t ptid)
6e4879f0
MM
2681{
2682 DEBUG ("stop %s", target_pid_to_str (ptid));
2683
2684 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2685 if ((::execution_direction != EXEC_REVERSE)
2686 && !record_is_replaying (minus_one_ptid))
6e4879f0 2687 {
b6a8c27b 2688 this->beneath ()->stop (ptid);
6e4879f0
MM
2689 }
2690 else
2691 {
2692 struct thread_info *tp;
2693
2694 ALL_NON_EXITED_THREADS (tp)
2695 if (ptid_match (tp->ptid, ptid))
2696 {
2697 tp->btrace.flags &= ~BTHR_MOVE;
2698 tp->btrace.flags |= BTHR_STOP;
2699 }
2700 }
2701 }
2702
f6ac5f3d 2703/* The can_execute_reverse method of target record-btrace. */
52834460 2704
57810aa7 2705bool
f6ac5f3d 2706record_btrace_target::can_execute_reverse ()
52834460 2707{
57810aa7 2708 return true;
52834460
MM
2709}
2710
f6ac5f3d 2711/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2712
57810aa7 2713bool
f6ac5f3d 2714record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2715{
f6ac5f3d 2716 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2717 {
2718 struct thread_info *tp = inferior_thread ();
2719
2720 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2721 }
2722
b6a8c27b 2723 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2724}
2725
f6ac5f3d 2726/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2727 record-btrace. */
2728
57810aa7 2729bool
f6ac5f3d 2730record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2731{
f6ac5f3d 2732 if (record_is_replaying (minus_one_ptid))
57810aa7 2733 return true;
9e8915c6 2734
b6a8c27b 2735 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2736}
2737
f6ac5f3d 2738/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2739
57810aa7 2740bool
f6ac5f3d 2741record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2742{
f6ac5f3d 2743 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2744 {
2745 struct thread_info *tp = inferior_thread ();
2746
2747 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2748 }
2749
b6a8c27b 2750 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2751}
2752
f6ac5f3d 2753/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2754 record-btrace. */
2755
57810aa7 2756bool
f6ac5f3d 2757record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2758{
f6ac5f3d 2759 if (record_is_replaying (minus_one_ptid))
57810aa7 2760 return true;
52834460 2761
b6a8c27b 2762 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2763}
2764
f6ac5f3d 2765/* The update_thread_list method of target record-btrace. */
e2887aa3 2766
f6ac5f3d
PA
2767void
2768record_btrace_target::update_thread_list ()
e2887aa3 2769{
e8032dde 2770 /* We don't add or remove threads during replay. */
f6ac5f3d 2771 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2772 return;
2773
2774 /* Forward the request. */
b6a8c27b 2775 this->beneath ()->update_thread_list ();
e2887aa3
MM
2776}
2777
f6ac5f3d 2778/* The thread_alive method of target record-btrace. */
e2887aa3 2779
57810aa7 2780bool
f6ac5f3d 2781record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2782{
2783 /* We don't add or remove threads during replay. */
f6ac5f3d 2784 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2785 return find_thread_ptid (ptid) != NULL;
2786
2787 /* Forward the request. */
b6a8c27b 2788 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2789}
2790
066ce621
MM
2791/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2792 is stopped. */
2793
2794static void
2795record_btrace_set_replay (struct thread_info *tp,
2796 const struct btrace_insn_iterator *it)
2797{
2798 struct btrace_thread_info *btinfo;
2799
2800 btinfo = &tp->btrace;
2801
a0f1b963 2802 if (it == NULL)
52834460 2803 record_btrace_stop_replaying (tp);
066ce621
MM
2804 else
2805 {
2806 if (btinfo->replay == NULL)
52834460 2807 record_btrace_start_replaying (tp);
066ce621
MM
2808 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2809 return;
2810
2811 *btinfo->replay = *it;
52834460 2812 registers_changed_ptid (tp->ptid);
066ce621
MM
2813 }
2814
52834460
MM
2815 /* Start anew from the new replay position. */
2816 record_btrace_clear_histories (btinfo);
485668e5
MM
2817
2818 stop_pc = regcache_read_pc (get_current_regcache ());
2819 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2820}
2821
f6ac5f3d 2822/* The goto_record_begin method of target record-btrace. */
066ce621 2823
f6ac5f3d
PA
2824void
2825record_btrace_target::goto_record_begin ()
066ce621
MM
2826{
2827 struct thread_info *tp;
2828 struct btrace_insn_iterator begin;
2829
2830 tp = require_btrace_thread ();
2831
2832 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2833
2834 /* Skip gaps at the beginning of the trace. */
2835 while (btrace_insn_get (&begin) == NULL)
2836 {
2837 unsigned int steps;
2838
2839 steps = btrace_insn_next (&begin, 1);
2840 if (steps == 0)
2841 error (_("No trace."));
2842 }
2843
066ce621 2844 record_btrace_set_replay (tp, &begin);
066ce621
MM
2845}
2846
f6ac5f3d 2847/* The goto_record_end method of target record-btrace. */
066ce621 2848
f6ac5f3d
PA
2849void
2850record_btrace_target::goto_record_end ()
066ce621
MM
2851{
2852 struct thread_info *tp;
2853
2854 tp = require_btrace_thread ();
2855
2856 record_btrace_set_replay (tp, NULL);
066ce621
MM
2857}
2858
f6ac5f3d 2859/* The goto_record method of target record-btrace. */
066ce621 2860
f6ac5f3d
PA
2861void
2862record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2863{
2864 struct thread_info *tp;
2865 struct btrace_insn_iterator it;
2866 unsigned int number;
2867 int found;
2868
2869 number = insn;
2870
2871 /* Check for wrap-arounds. */
2872 if (number != insn)
2873 error (_("Instruction number out of range."));
2874
2875 tp = require_btrace_thread ();
2876
2877 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2878
2879 /* Check if the instruction could not be found or is a gap. */
2880 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2881 error (_("No such instruction."));
2882
2883 record_btrace_set_replay (tp, &it);
066ce621
MM
2884}
2885
f6ac5f3d 2886/* The record_stop_replaying method of target record-btrace. */
797094dd 2887
f6ac5f3d
PA
2888void
2889record_btrace_target::record_stop_replaying ()
797094dd
MM
2890{
2891 struct thread_info *tp;
2892
2893 ALL_NON_EXITED_THREADS (tp)
2894 record_btrace_stop_replaying (tp);
2895}
2896
f6ac5f3d 2897/* The execution_direction target method. */
70ad5bff 2898
f6ac5f3d
PA
2899enum exec_direction_kind
2900record_btrace_target::execution_direction ()
70ad5bff
MM
2901{
2902 return record_btrace_resume_exec_dir;
2903}
2904
f6ac5f3d 2905/* The prepare_to_generate_core target method. */
aef92902 2906
f6ac5f3d
PA
2907void
2908record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2909{
2910 record_btrace_generating_corefile = 1;
2911}
2912
f6ac5f3d 2913/* The done_generating_core target method. */
aef92902 2914
f6ac5f3d
PA
2915void
2916record_btrace_target::done_generating_core ()
aef92902
MM
2917{
2918 record_btrace_generating_corefile = 0;
2919}
2920
f4abbc16
MM
2921/* Start recording in BTS format. */
2922
2923static void
cdb34d4a 2924cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2925{
f4abbc16
MM
2926 if (args != NULL && *args != 0)
2927 error (_("Invalid argument."));
2928
2929 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2930
492d29ea
PA
2931 TRY
2932 {
95a6b0a1 2933 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2934 }
2935 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2936 {
2937 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2938 throw_exception (exception);
2939 }
492d29ea 2940 END_CATCH
f4abbc16
MM
2941}
2942
bc504a31 2943/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2944
2945static void
cdb34d4a 2946cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2947{
2948 if (args != NULL && *args != 0)
2949 error (_("Invalid argument."));
2950
b20a6524 2951 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2952
492d29ea
PA
2953 TRY
2954 {
95a6b0a1 2955 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2956 }
2957 CATCH (exception, RETURN_MASK_ALL)
2958 {
2959 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2960 throw_exception (exception);
2961 }
2962 END_CATCH
afedecd3
MM
2963}
2964
b20a6524
MM
2965/* Alias for "target record". */
2966
2967static void
981a3fb3 2968cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2969{
2970 if (args != NULL && *args != 0)
2971 error (_("Invalid argument."));
2972
2973 record_btrace_conf.format = BTRACE_FORMAT_PT;
2974
2975 TRY
2976 {
95a6b0a1 2977 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2978 }
2979 CATCH (exception, RETURN_MASK_ALL)
2980 {
2981 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2982
2983 TRY
2984 {
95a6b0a1 2985 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2986 }
2987 CATCH (exception, RETURN_MASK_ALL)
2988 {
2989 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2990 throw_exception (exception);
2991 }
2992 END_CATCH
2993 }
2994 END_CATCH
2995}
2996
67b5c0c1
MM
2997/* The "set record btrace" command. */
2998
2999static void
981a3fb3 3000cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1 3001{
b85310e1
MM
3002 printf_unfiltered (_("\"set record btrace\" must be followed "
3003 "by an appropriate subcommand.\n"));
3004 help_list (set_record_btrace_cmdlist, "set record btrace ",
3005 all_commands, gdb_stdout);
67b5c0c1
MM
3006}
3007
3008/* The "show record btrace" command. */
3009
3010static void
981a3fb3 3011cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
3012{
3013 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3014}
3015
3016/* The "show record btrace replay-memory-access" command. */
3017
3018static void
3019cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3020 struct cmd_list_element *c, const char *value)
3021{
3022 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3023 replay_memory_access);
3024}
3025
4a4495d6
MM
3026/* The "set record btrace cpu none" command. */
3027
3028static void
3029cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
3030{
3031 if (args != nullptr && *args != 0)
3032 error (_("Trailing junk: '%s'."), args);
3033
3034 record_btrace_cpu_state = CS_NONE;
3035}
3036
3037/* The "set record btrace cpu auto" command. */
3038
3039static void
3040cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3041{
3042 if (args != nullptr && *args != 0)
3043 error (_("Trailing junk: '%s'."), args);
3044
3045 record_btrace_cpu_state = CS_AUTO;
3046}
3047
3048/* The "set record btrace cpu" command. */
3049
3050static void
3051cmd_set_record_btrace_cpu (const char *args, int from_tty)
3052{
3053 if (args == nullptr)
3054 args = "";
3055
3056 /* We use a hard-coded vendor string for now. */
3057 unsigned int family, model, stepping;
3058 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3059 &model, &l1, &stepping, &l2);
3060 if (matches == 3)
3061 {
3062 if (strlen (args) != l2)
3063 error (_("Trailing junk: '%s'."), args + l2);
3064 }
3065 else if (matches == 2)
3066 {
3067 if (strlen (args) != l1)
3068 error (_("Trailing junk: '%s'."), args + l1);
3069
3070 stepping = 0;
3071 }
3072 else
3073 error (_("Bad format. See \"help set record btrace cpu\"."));
3074
3075 if (USHRT_MAX < family)
3076 error (_("Cpu family too big."));
3077
3078 if (UCHAR_MAX < model)
3079 error (_("Cpu model too big."));
3080
3081 if (UCHAR_MAX < stepping)
3082 error (_("Cpu stepping too big."));
3083
3084 record_btrace_cpu.vendor = CV_INTEL;
3085 record_btrace_cpu.family = family;
3086 record_btrace_cpu.model = model;
3087 record_btrace_cpu.stepping = stepping;
3088
3089 record_btrace_cpu_state = CS_CPU;
3090}
3091
3092/* The "show record btrace cpu" command. */
3093
3094static void
3095cmd_show_record_btrace_cpu (const char *args, int from_tty)
3096{
3097 const char *cpu;
3098
3099 if (args != nullptr && *args != 0)
3100 error (_("Trailing junk: '%s'."), args);
3101
3102 switch (record_btrace_cpu_state)
3103 {
3104 case CS_AUTO:
3105 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3106 return;
3107
3108 case CS_NONE:
3109 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3110 return;
3111
3112 case CS_CPU:
3113 switch (record_btrace_cpu.vendor)
3114 {
3115 case CV_INTEL:
3116 if (record_btrace_cpu.stepping == 0)
3117 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3118 record_btrace_cpu.family,
3119 record_btrace_cpu.model);
3120 else
3121 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3122 record_btrace_cpu.family,
3123 record_btrace_cpu.model,
3124 record_btrace_cpu.stepping);
3125 return;
3126 }
3127 }
3128
3129 error (_("Internal error: bad cpu state."));
3130}
3131
3132/* The "s record btrace bts" command. */
d33501a5
MM
3133
3134static void
981a3fb3 3135cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3136{
3137 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3138 "by an appropriate subcommand.\n"));
d33501a5
MM
3139 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3140 all_commands, gdb_stdout);
3141}
3142
3143/* The "show record btrace bts" command. */
3144
3145static void
981a3fb3 3146cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3147{
3148 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3149}
3150
b20a6524
MM
3151/* The "set record btrace pt" command. */
3152
3153static void
981a3fb3 3154cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3155{
3156 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3157 "by an appropriate subcommand.\n"));
3158 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3159 all_commands, gdb_stdout);
3160}
3161
3162/* The "show record btrace pt" command. */
3163
3164static void
981a3fb3 3165cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3166{
3167 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3168}
3169
3170/* The "record bts buffer-size" show value function. */
3171
3172static void
3173show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3174 struct cmd_list_element *c,
3175 const char *value)
3176{
3177 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3178 value);
3179}
3180
3181/* The "record pt buffer-size" show value function. */
3182
3183static void
3184show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3185 struct cmd_list_element *c,
3186 const char *value)
3187{
3188 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3189 value);
3190}
3191
afedecd3
MM
3192/* Initialize btrace commands. */
3193
3194void
3195_initialize_record_btrace (void)
3196{
f4abbc16
MM
3197 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3198 _("Start branch trace recording."), &record_btrace_cmdlist,
3199 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3200 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3201
f4abbc16
MM
3202 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3203 _("\
3204Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3205The processor stores a from/to record for each branch into a cyclic buffer.\n\
3206This format may not be available on all processors."),
3207 &record_btrace_cmdlist);
3208 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3209
b20a6524
MM
3210 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3211 _("\
bc504a31 3212Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3213This format may not be available on all processors."),
3214 &record_btrace_cmdlist);
3215 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3216
67b5c0c1
MM
3217 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3218 _("Set record options"), &set_record_btrace_cmdlist,
3219 "set record btrace ", 0, &set_record_cmdlist);
3220
3221 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3222 _("Show record options"), &show_record_btrace_cmdlist,
3223 "show record btrace ", 0, &show_record_cmdlist);
3224
3225 add_setshow_enum_cmd ("replay-memory-access", no_class,
3226 replay_memory_access_types, &replay_memory_access, _("\
3227Set what memory accesses are allowed during replay."), _("\
3228Show what memory accesses are allowed during replay."),
3229 _("Default is READ-ONLY.\n\n\
3230The btrace record target does not trace data.\n\
3231The memory therefore corresponds to the live target and not \
3232to the current replay position.\n\n\
3233When READ-ONLY, allow accesses to read-only memory during replay.\n\
3234When READ-WRITE, allow accesses to read-only and read-write memory during \
3235replay."),
3236 NULL, cmd_show_replay_memory_access,
3237 &set_record_btrace_cmdlist,
3238 &show_record_btrace_cmdlist);
3239
4a4495d6
MM
3240 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3241 _("\
3242Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3243The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3244For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3245When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3246The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3247When GDB does not support that cpu, this option can be used to enable\n\
3248workarounds for a similar cpu that GDB supports.\n\n\
3249When set to \"none\", errata workarounds are disabled."),
3250 &set_record_btrace_cpu_cmdlist,
3251 _("set record btrace cpu "), 1,
3252 &set_record_btrace_cmdlist);
3253
3254 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3255Automatically determine the cpu to be used for trace decode."),
3256 &set_record_btrace_cpu_cmdlist);
3257
3258 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3259Do not enable errata workarounds for trace decode."),
3260 &set_record_btrace_cpu_cmdlist);
3261
3262 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3263Show the cpu to be used for trace decode."),
3264 &show_record_btrace_cmdlist);
3265
d33501a5
MM
3266 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3267 _("Set record btrace bts options"),
3268 &set_record_btrace_bts_cmdlist,
3269 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3270
3271 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3272 _("Show record btrace bts options"),
3273 &show_record_btrace_bts_cmdlist,
3274 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3275
3276 add_setshow_uinteger_cmd ("buffer-size", no_class,
3277 &record_btrace_conf.bts.size,
3278 _("Set the record/replay bts buffer size."),
3279 _("Show the record/replay bts buffer size."), _("\
3280When starting recording request a trace buffer of this size. \
3281The actual buffer size may differ from the requested size. \
3282Use \"info record\" to see the actual buffer size.\n\n\
3283Bigger buffers allow longer recording but also take more time to process \
3284the recorded execution trace.\n\n\
b20a6524
MM
3285The trace buffer size may not be changed while recording."), NULL,
3286 show_record_bts_buffer_size_value,
d33501a5
MM
3287 &set_record_btrace_bts_cmdlist,
3288 &show_record_btrace_bts_cmdlist);
3289
b20a6524
MM
3290 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3291 _("Set record btrace pt options"),
3292 &set_record_btrace_pt_cmdlist,
3293 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3294
3295 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3296 _("Show record btrace pt options"),
3297 &show_record_btrace_pt_cmdlist,
3298 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3299
3300 add_setshow_uinteger_cmd ("buffer-size", no_class,
3301 &record_btrace_conf.pt.size,
3302 _("Set the record/replay pt buffer size."),
3303 _("Show the record/replay pt buffer size."), _("\
3304Bigger buffers allow longer recording but also take more time to process \
3305the recorded execution.\n\
3306The actual buffer size may differ from the requested size. Use \"info record\" \
3307to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3308 &set_record_btrace_pt_cmdlist,
3309 &show_record_btrace_pt_cmdlist);
3310
d9f719f1 3311 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3312
3313 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3314 xcalloc, xfree);
d33501a5
MM
3315
3316 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3317 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3318}