]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
Fix gdb.base/print-file-var.exp with Clang
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
b811d2c2 3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
400b5eca 39#include "gdbsupport/event-loop.h"
70ad5bff 40#include "inf-loop.h"
00431a78 41#include "inferior.h"
325fac50 42#include <algorithm>
0d12e84c 43#include "gdbarch.h"
e43b10e1 44#include "cli/cli-style.h"
93b54c8e 45#include "async-event.h"
159ed7d9 46#include <forward_list>
afedecd3 47
d9f719f1
PA
48static const target_info record_btrace_target_info = {
49 "record-btrace",
50 N_("Branch tracing target"),
51 N_("Collect control-flow trace and provide the execution history.")
52};
53
afedecd3 54/* The target_ops of record-btrace. */
f6ac5f3d
PA
55
56class record_btrace_target final : public target_ops
57{
58public:
d9f719f1
PA
59 const target_info &info () const override
60 { return record_btrace_target_info; }
f6ac5f3d 61
66b4deae
PA
62 strata stratum () const override { return record_stratum; }
63
f6ac5f3d
PA
64 void close () override;
65 void async (int) override;
66
67 void detach (inferior *inf, int from_tty) override
68 { record_detach (this, inf, from_tty); }
69
70 void disconnect (const char *, int) override;
71
72 void mourn_inferior () override
73 { record_mourn_inferior (this); }
74
75 void kill () override
76 { record_kill (this); }
77
78 enum record_method record_method (ptid_t ptid) override;
79
80 void stop_recording () override;
81 void info_record () override;
82
83 void insn_history (int size, gdb_disassembly_flags flags) override;
84 void insn_history_from (ULONGEST from, int size,
85 gdb_disassembly_flags flags) override;
86 void insn_history_range (ULONGEST begin, ULONGEST end,
87 gdb_disassembly_flags flags) override;
88 void call_history (int size, record_print_flags flags) override;
89 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
90 override;
91 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
92 override;
93
57810aa7
PA
94 bool record_is_replaying (ptid_t ptid) override;
95 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
96 void record_stop_replaying () override;
97
98 enum target_xfer_status xfer_partial (enum target_object object,
99 const char *annex,
100 gdb_byte *readbuf,
101 const gdb_byte *writebuf,
102 ULONGEST offset, ULONGEST len,
103 ULONGEST *xfered_len) override;
104
105 int insert_breakpoint (struct gdbarch *,
106 struct bp_target_info *) override;
107 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
108 enum remove_bp_reason) override;
109
110 void fetch_registers (struct regcache *, int) override;
111
112 void store_registers (struct regcache *, int) override;
113 void prepare_to_store (struct regcache *) override;
114
115 const struct frame_unwind *get_unwinder () override;
116
117 const struct frame_unwind *get_tailcall_unwinder () override;
118
119 void commit_resume () override;
120 void resume (ptid_t, int, enum gdb_signal) override;
b60cea74 121 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
f6ac5f3d
PA
122
123 void stop (ptid_t) override;
124 void update_thread_list () override;
57810aa7 125 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
126 void goto_record_begin () override;
127 void goto_record_end () override;
128 void goto_record (ULONGEST insn) override;
129
57810aa7 130 bool can_execute_reverse () override;
f6ac5f3d 131
57810aa7
PA
132 bool stopped_by_sw_breakpoint () override;
133 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 134
57810aa7
PA
135 bool stopped_by_hw_breakpoint () override;
136 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
137
138 enum exec_direction_kind execution_direction () override;
139 void prepare_to_generate_core () override;
140 void done_generating_core () override;
141};
142
143static record_btrace_target record_btrace_ops;
144
145/* Initialize the record-btrace target ops. */
afedecd3 146
76727919
TT
147/* Token associated with a new-thread observer enabling branch tracing
148 for the new thread. */
3dcfdc58 149static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 150
67b5c0c1
MM
151/* Memory access types used in set/show record btrace replay-memory-access. */
152static const char replay_memory_access_read_only[] = "read-only";
153static const char replay_memory_access_read_write[] = "read-write";
154static const char *const replay_memory_access_types[] =
155{
156 replay_memory_access_read_only,
157 replay_memory_access_read_write,
158 NULL
159};
160
161/* The currently allowed replay memory access type. */
162static const char *replay_memory_access = replay_memory_access_read_only;
163
4a4495d6
MM
164/* The cpu state kinds. */
165enum record_btrace_cpu_state_kind
166{
167 CS_AUTO,
168 CS_NONE,
169 CS_CPU
170};
171
172/* The current cpu state. */
173static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
174
175/* The current cpu for trace decode. */
176static struct btrace_cpu record_btrace_cpu;
177
67b5c0c1
MM
178/* Command lists for "set/show record btrace". */
179static struct cmd_list_element *set_record_btrace_cmdlist;
180static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 181
70ad5bff
MM
182/* The execution direction of the last resume we got. See record-full.c. */
183static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
184
185/* The async event handler for reverse/replay execution. */
186static struct async_event_handler *record_btrace_async_inferior_event_handler;
187
aef92902
MM
188/* A flag indicating that we are currently generating a core file. */
189static int record_btrace_generating_corefile;
190
f4abbc16
MM
191/* The current branch trace configuration. */
192static struct btrace_config record_btrace_conf;
193
194/* Command list for "record btrace". */
195static struct cmd_list_element *record_btrace_cmdlist;
196
d33501a5
MM
197/* Command lists for "set/show record btrace bts". */
198static struct cmd_list_element *set_record_btrace_bts_cmdlist;
199static struct cmd_list_element *show_record_btrace_bts_cmdlist;
200
b20a6524
MM
201/* Command lists for "set/show record btrace pt". */
202static struct cmd_list_element *set_record_btrace_pt_cmdlist;
203static struct cmd_list_element *show_record_btrace_pt_cmdlist;
204
4a4495d6
MM
205/* Command list for "set record btrace cpu". */
206static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
207
afedecd3
MM
208/* Print a record-btrace debug message. Use do ... while (0) to avoid
209 ambiguities when used in if statements. */
210
211#define DEBUG(msg, args...) \
212 do \
213 { \
214 if (record_debug != 0) \
215 fprintf_unfiltered (gdb_stdlog, \
216 "[record-btrace] " msg "\n", ##args); \
217 } \
218 while (0)
219
220
4a4495d6
MM
221/* Return the cpu configured by the user. Returns NULL if the cpu was
222 configured as auto. */
223const struct btrace_cpu *
224record_btrace_get_cpu (void)
225{
226 switch (record_btrace_cpu_state)
227 {
228 case CS_AUTO:
229 return nullptr;
230
231 case CS_NONE:
232 record_btrace_cpu.vendor = CV_UNKNOWN;
233 /* Fall through. */
234 case CS_CPU:
235 return &record_btrace_cpu;
236 }
237
238 error (_("Internal error: bad record btrace cpu state."));
239}
240
afedecd3 241/* Update the branch trace for the current thread and return a pointer to its
066ce621 242 thread_info.
afedecd3
MM
243
244 Throws an error if there is no thread or no trace. This function never
245 returns NULL. */
246
066ce621
MM
247static struct thread_info *
248require_btrace_thread (void)
afedecd3 249{
afedecd3
MM
250 DEBUG ("require");
251
00431a78 252 if (inferior_ptid == null_ptid)
afedecd3
MM
253 error (_("No thread."));
254
00431a78
PA
255 thread_info *tp = inferior_thread ();
256
cd4007e4
MM
257 validate_registers_access ();
258
4a4495d6 259 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 260
6e07b1d2 261 if (btrace_is_empty (tp))
afedecd3
MM
262 error (_("No trace."));
263
066ce621
MM
264 return tp;
265}
266
267/* Update the branch trace for the current thread and return a pointer to its
268 branch trace information struct.
269
270 Throws an error if there is no thread or no trace. This function never
271 returns NULL. */
272
273static struct btrace_thread_info *
274require_btrace (void)
275{
276 struct thread_info *tp;
277
278 tp = require_btrace_thread ();
279
280 return &tp->btrace;
afedecd3
MM
281}
282
283/* Enable branch tracing for one thread. Warn on errors. */
284
285static void
286record_btrace_enable_warn (struct thread_info *tp)
287{
d89edf9b
MM
288 /* Ignore this thread if its inferior is not recorded by us. */
289 target_ops *rec = tp->inf->target_at (record_stratum);
290 if (rec != &record_btrace_ops)
291 return;
292
a70b8144 293 try
492d29ea
PA
294 {
295 btrace_enable (tp, &record_btrace_conf);
296 }
230d2906 297 catch (const gdb_exception_error &error)
492d29ea 298 {
3d6e9d23 299 warning ("%s", error.what ());
492d29ea 300 }
afedecd3
MM
301}
302
afedecd3
MM
303/* Enable automatic tracing of new threads. */
304
305static void
306record_btrace_auto_enable (void)
307{
308 DEBUG ("attach thread observer");
309
76727919
TT
310 gdb::observers::new_thread.attach (record_btrace_enable_warn,
311 record_btrace_thread_observer_token);
afedecd3
MM
312}
313
314/* Disable automatic tracing of new threads. */
315
316static void
317record_btrace_auto_disable (void)
318{
afedecd3
MM
319 DEBUG ("detach thread observer");
320
76727919 321 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
322}
323
70ad5bff
MM
324/* The record-btrace async event handler function. */
325
326static void
327record_btrace_handle_async_inferior_event (gdb_client_data data)
328{
b1a35af2 329 inferior_event_handler (INF_REG_EVENT);
70ad5bff
MM
330}
331
c0272db5
TW
332/* See record-btrace.h. */
333
334void
335record_btrace_push_target (void)
336{
337 const char *format;
338
339 record_btrace_auto_enable ();
340
341 push_target (&record_btrace_ops);
342
343 record_btrace_async_inferior_event_handler
344 = create_async_event_handler (record_btrace_handle_async_inferior_event,
db20ebdf 345 NULL, "record-btrace");
c0272db5
TW
346 record_btrace_generating_corefile = 0;
347
348 format = btrace_format_short_string (record_btrace_conf.format);
76727919 349 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
350}
351
228f1508
SM
352/* Disable btrace on a set of threads on scope exit. */
353
354struct scoped_btrace_disable
355{
356 scoped_btrace_disable () = default;
357
358 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
359
360 ~scoped_btrace_disable ()
361 {
362 for (thread_info *tp : m_threads)
363 btrace_disable (tp);
364 }
365
366 void add_thread (thread_info *thread)
367 {
368 m_threads.push_front (thread);
369 }
370
371 void discard ()
372 {
373 m_threads.clear ();
374 }
375
376private:
377 std::forward_list<thread_info *> m_threads;
378};
379
d9f719f1 380/* Open target record-btrace. */
afedecd3 381
d9f719f1
PA
382static void
383record_btrace_target_open (const char *args, int from_tty)
afedecd3 384{
228f1508
SM
385 /* If we fail to enable btrace for one thread, disable it for the threads for
386 which it was successfully enabled. */
387 scoped_btrace_disable btrace_disable;
afedecd3
MM
388
389 DEBUG ("open");
390
8213266a 391 record_preopen ();
afedecd3 392
55f6301a 393 if (!target_has_execution ())
afedecd3
MM
394 error (_("The program is not being run."));
395
d89edf9b 396 for (thread_info *tp : current_inferior ()->non_exited_threads ())
5d5658a1 397 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 398 {
f4abbc16 399 btrace_enable (tp, &record_btrace_conf);
afedecd3 400
228f1508 401 btrace_disable.add_thread (tp);
afedecd3
MM
402 }
403
c0272db5 404 record_btrace_push_target ();
afedecd3 405
228f1508 406 btrace_disable.discard ();
afedecd3
MM
407}
408
f6ac5f3d 409/* The stop_recording method of target record-btrace. */
afedecd3 410
f6ac5f3d
PA
411void
412record_btrace_target::stop_recording ()
afedecd3 413{
afedecd3
MM
414 DEBUG ("stop recording");
415
416 record_btrace_auto_disable ();
417
d89edf9b 418 for (thread_info *tp : current_inferior ()->non_exited_threads ())
afedecd3
MM
419 if (tp->btrace.target != NULL)
420 btrace_disable (tp);
421}
422
f6ac5f3d 423/* The disconnect method of target record-btrace. */
c0272db5 424
f6ac5f3d
PA
425void
426record_btrace_target::disconnect (const char *args,
427 int from_tty)
c0272db5 428{
b6a8c27b 429 struct target_ops *beneath = this->beneath ();
c0272db5
TW
430
431 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 432 unpush_target (this);
c0272db5
TW
433
434 /* Forward disconnect. */
f6ac5f3d 435 beneath->disconnect (args, from_tty);
c0272db5
TW
436}
437
f6ac5f3d 438/* The close method of target record-btrace. */
afedecd3 439
f6ac5f3d
PA
440void
441record_btrace_target::close ()
afedecd3 442{
70ad5bff
MM
443 if (record_btrace_async_inferior_event_handler != NULL)
444 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
445
99c819ee
MM
446 /* Make sure automatic recording gets disabled even if we did not stop
447 recording before closing the record-btrace target. */
448 record_btrace_auto_disable ();
449
568e808b
MM
450 /* We should have already stopped recording.
451 Tear down btrace in case we have not. */
d89edf9b 452 for (thread_info *tp : current_inferior ()->non_exited_threads ())
568e808b 453 btrace_teardown (tp);
afedecd3
MM
454}
455
f6ac5f3d 456/* The async method of target record-btrace. */
b7d2e916 457
f6ac5f3d
PA
458void
459record_btrace_target::async (int enable)
b7d2e916 460{
6a3753b3 461 if (enable)
b7d2e916
PA
462 mark_async_event_handler (record_btrace_async_inferior_event_handler);
463 else
464 clear_async_event_handler (record_btrace_async_inferior_event_handler);
465
b6a8c27b 466 this->beneath ()->async (enable);
b7d2e916
PA
467}
468
d33501a5
MM
469/* Adjusts the size and returns a human readable size suffix. */
470
471static const char *
472record_btrace_adjust_size (unsigned int *size)
473{
474 unsigned int sz;
475
476 sz = *size;
477
478 if ((sz & ((1u << 30) - 1)) == 0)
479 {
480 *size = sz >> 30;
481 return "GB";
482 }
483 else if ((sz & ((1u << 20) - 1)) == 0)
484 {
485 *size = sz >> 20;
486 return "MB";
487 }
488 else if ((sz & ((1u << 10) - 1)) == 0)
489 {
490 *size = sz >> 10;
491 return "kB";
492 }
493 else
494 return "";
495}
496
497/* Print a BTS configuration. */
498
499static void
500record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
501{
502 const char *suffix;
503 unsigned int size;
504
505 size = conf->size;
506 if (size > 0)
507 {
508 suffix = record_btrace_adjust_size (&size);
509 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
510 }
511}
512
bc504a31 513/* Print an Intel Processor Trace configuration. */
b20a6524
MM
514
515static void
516record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
517{
518 const char *suffix;
519 unsigned int size;
520
521 size = conf->size;
522 if (size > 0)
523 {
524 suffix = record_btrace_adjust_size (&size);
525 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
526 }
527}
528
d33501a5
MM
529/* Print a branch tracing configuration. */
530
531static void
532record_btrace_print_conf (const struct btrace_config *conf)
533{
534 printf_unfiltered (_("Recording format: %s.\n"),
535 btrace_format_string (conf->format));
536
537 switch (conf->format)
538 {
539 case BTRACE_FORMAT_NONE:
540 return;
541
542 case BTRACE_FORMAT_BTS:
543 record_btrace_print_bts_conf (&conf->bts);
544 return;
b20a6524
MM
545
546 case BTRACE_FORMAT_PT:
547 record_btrace_print_pt_conf (&conf->pt);
548 return;
d33501a5
MM
549 }
550
40c94099 551 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
d33501a5
MM
552}
553
f6ac5f3d 554/* The info_record method of target record-btrace. */
afedecd3 555
f6ac5f3d
PA
556void
557record_btrace_target::info_record ()
afedecd3
MM
558{
559 struct btrace_thread_info *btinfo;
f4abbc16 560 const struct btrace_config *conf;
afedecd3 561 struct thread_info *tp;
31fd9caa 562 unsigned int insns, calls, gaps;
afedecd3
MM
563
564 DEBUG ("info");
565
5b6d1e4f 566 if (inferior_ptid == null_ptid)
afedecd3
MM
567 error (_("No thread."));
568
5b6d1e4f
PA
569 tp = inferior_thread ();
570
cd4007e4
MM
571 validate_registers_access ();
572
f4abbc16
MM
573 btinfo = &tp->btrace;
574
f6ac5f3d 575 conf = ::btrace_conf (btinfo);
f4abbc16 576 if (conf != NULL)
d33501a5 577 record_btrace_print_conf (conf);
f4abbc16 578
4a4495d6 579 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 580
23a7fe75
MM
581 insns = 0;
582 calls = 0;
31fd9caa 583 gaps = 0;
23a7fe75 584
6e07b1d2 585 if (!btrace_is_empty (tp))
23a7fe75
MM
586 {
587 struct btrace_call_iterator call;
588 struct btrace_insn_iterator insn;
589
590 btrace_call_end (&call, btinfo);
591 btrace_call_prev (&call, 1);
5de9129b 592 calls = btrace_call_number (&call);
23a7fe75
MM
593
594 btrace_insn_end (&insn, btinfo);
5de9129b 595 insns = btrace_insn_number (&insn);
31fd9caa 596
69090cee
TW
597 /* If the last instruction is not a gap, it is the current instruction
598 that is not actually part of the record. */
599 if (btrace_insn_get (&insn) != NULL)
600 insns -= 1;
31fd9caa
MM
601
602 gaps = btinfo->ngaps;
23a7fe75 603 }
afedecd3 604
31fd9caa 605 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 606 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
607 print_thread_id (tp),
608 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
609
610 if (btrace_is_replaying (tp))
611 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
612 btrace_insn_number (btinfo->replay));
afedecd3
MM
613}
614
31fd9caa
MM
615/* Print a decode error. */
616
617static void
618btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
619 enum btrace_format format)
620{
508352a9 621 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 622
112e8700 623 uiout->text (_("["));
508352a9
TW
624 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
625 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 626 {
112e8700 627 uiout->text (_("decode error ("));
381befee 628 uiout->field_signed ("errcode", errcode);
112e8700 629 uiout->text (_("): "));
31fd9caa 630 }
112e8700
SM
631 uiout->text (errstr);
632 uiout->text (_("]\n"));
31fd9caa
MM
633}
634
f94cc897
MM
635/* A range of source lines. */
636
637struct btrace_line_range
638{
639 /* The symtab this line is from. */
640 struct symtab *symtab;
641
642 /* The first line (inclusive). */
643 int begin;
644
645 /* The last line (exclusive). */
646 int end;
647};
648
649/* Construct a line range. */
650
651static struct btrace_line_range
652btrace_mk_line_range (struct symtab *symtab, int begin, int end)
653{
654 struct btrace_line_range range;
655
656 range.symtab = symtab;
657 range.begin = begin;
658 range.end = end;
659
660 return range;
661}
662
663/* Add a line to a line range. */
664
665static struct btrace_line_range
666btrace_line_range_add (struct btrace_line_range range, int line)
667{
668 if (range.end <= range.begin)
669 {
670 /* This is the first entry. */
671 range.begin = line;
672 range.end = line + 1;
673 }
674 else if (line < range.begin)
675 range.begin = line;
676 else if (range.end < line)
677 range.end = line;
678
679 return range;
680}
681
682/* Return non-zero if RANGE is empty, zero otherwise. */
683
684static int
685btrace_line_range_is_empty (struct btrace_line_range range)
686{
687 return range.end <= range.begin;
688}
689
690/* Return non-zero if LHS contains RHS, zero otherwise. */
691
692static int
693btrace_line_range_contains_range (struct btrace_line_range lhs,
694 struct btrace_line_range rhs)
695{
696 return ((lhs.symtab == rhs.symtab)
697 && (lhs.begin <= rhs.begin)
698 && (rhs.end <= lhs.end));
699}
700
701/* Find the line range associated with PC. */
702
703static struct btrace_line_range
704btrace_find_line_range (CORE_ADDR pc)
705{
706 struct btrace_line_range range;
707 struct linetable_entry *lines;
708 struct linetable *ltable;
709 struct symtab *symtab;
710 int nlines, i;
711
712 symtab = find_pc_line_symtab (pc);
713 if (symtab == NULL)
714 return btrace_mk_line_range (NULL, 0, 0);
715
716 ltable = SYMTAB_LINETABLE (symtab);
717 if (ltable == NULL)
718 return btrace_mk_line_range (symtab, 0, 0);
719
720 nlines = ltable->nitems;
721 lines = ltable->item;
722 if (nlines <= 0)
723 return btrace_mk_line_range (symtab, 0, 0);
724
725 range = btrace_mk_line_range (symtab, 0, 0);
726 for (i = 0; i < nlines - 1; i++)
727 {
8c95582d
AB
728 /* The test of is_stmt here was added when the is_stmt field was
729 introduced to the 'struct linetable_entry' structure. This
730 ensured that this loop maintained the same behaviour as before we
731 introduced is_stmt. That said, it might be that we would be
732 better off not checking is_stmt here, this would lead to us
733 possibly adding more line numbers to the range. At the time this
734 change was made I was unsure how to test this so chose to go with
735 maintaining the existing experience. */
736 if ((lines[i].pc == pc) && (lines[i].line != 0)
737 && (lines[i].is_stmt == 1))
f94cc897
MM
738 range = btrace_line_range_add (range, lines[i].line);
739 }
740
741 return range;
742}
743
744/* Print source lines in LINES to UIOUT.
745
746 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
747 instructions corresponding to that source line. When printing a new source
748 line, we do the cleanups for the open chain and open a new cleanup chain for
749 the new source line. If the source line range in LINES is not empty, this
750 function will leave the cleanup chain for the last printed source line open
751 so instructions can be added to it. */
752
753static void
754btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
755 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
756 gdb::optional<ui_out_emit_list> *asm_list,
757 gdb_disassembly_flags flags)
f94cc897 758{
8d297bbf 759 print_source_lines_flags psl_flags;
f94cc897 760
f94cc897
MM
761 if (flags & DISASSEMBLY_FILENAME)
762 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
763
7ea78b59 764 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 765 {
7ea78b59 766 asm_list->reset ();
f94cc897 767
7ea78b59 768 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
769
770 print_source_lines (lines.symtab, line, line + 1, psl_flags);
771
7ea78b59 772 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
773 }
774}
775
afedecd3
MM
776/* Disassemble a section of the recorded instruction trace. */
777
778static void
23a7fe75 779btrace_insn_history (struct ui_out *uiout,
31fd9caa 780 const struct btrace_thread_info *btinfo,
23a7fe75 781 const struct btrace_insn_iterator *begin,
9a24775b
PA
782 const struct btrace_insn_iterator *end,
783 gdb_disassembly_flags flags)
afedecd3 784{
9a24775b
PA
785 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
786 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 787
f94cc897
MM
788 flags |= DISASSEMBLY_SPECULATIVE;
789
7ea78b59
SM
790 struct gdbarch *gdbarch = target_gdbarch ();
791 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 792
7ea78b59 793 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 794
7ea78b59
SM
795 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
796 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 797
046bebe1 798 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
8b172ce7 799
7ea78b59
SM
800 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
801 btrace_insn_next (&it, 1))
afedecd3 802 {
23a7fe75
MM
803 const struct btrace_insn *insn;
804
805 insn = btrace_insn_get (&it);
806
31fd9caa
MM
807 /* A NULL instruction indicates a gap in the trace. */
808 if (insn == NULL)
809 {
810 const struct btrace_config *conf;
811
812 conf = btrace_conf (btinfo);
afedecd3 813
31fd9caa
MM
814 /* We have trace so we must have a configuration. */
815 gdb_assert (conf != NULL);
816
69090cee
TW
817 uiout->field_fmt ("insn-number", "%u",
818 btrace_insn_number (&it));
819 uiout->text ("\t");
820
821 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
822 conf->format);
823 }
824 else
825 {
f94cc897 826 struct disasm_insn dinsn;
da8c46d2 827
f94cc897 828 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 829 {
f94cc897
MM
830 struct btrace_line_range lines;
831
832 lines = btrace_find_line_range (insn->pc);
833 if (!btrace_line_range_is_empty (lines)
834 && !btrace_line_range_contains_range (last_lines, lines))
835 {
7ea78b59
SM
836 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
837 flags);
f94cc897
MM
838 last_lines = lines;
839 }
7ea78b59 840 else if (!src_and_asm_tuple.has_value ())
f94cc897 841 {
7ea78b59
SM
842 gdb_assert (!asm_list.has_value ());
843
844 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
845
f94cc897 846 /* No source information. */
7ea78b59 847 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
848 }
849
7ea78b59
SM
850 gdb_assert (src_and_asm_tuple.has_value ());
851 gdb_assert (asm_list.has_value ());
da8c46d2 852 }
da8c46d2 853
f94cc897
MM
854 memset (&dinsn, 0, sizeof (dinsn));
855 dinsn.number = btrace_insn_number (&it);
856 dinsn.addr = insn->pc;
31fd9caa 857
da8c46d2 858 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 859 dinsn.is_speculative = 1;
da8c46d2 860
046bebe1 861 disasm.pretty_print_insn (&dinsn, flags);
31fd9caa 862 }
afedecd3
MM
863 }
864}
865
f6ac5f3d 866/* The insn_history method of target record-btrace. */
afedecd3 867
f6ac5f3d
PA
868void
869record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
870{
871 struct btrace_thread_info *btinfo;
23a7fe75
MM
872 struct btrace_insn_history *history;
873 struct btrace_insn_iterator begin, end;
afedecd3 874 struct ui_out *uiout;
23a7fe75 875 unsigned int context, covered;
afedecd3
MM
876
877 uiout = current_uiout;
2e783024 878 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 879 context = abs (size);
afedecd3
MM
880 if (context == 0)
881 error (_("Bad record instruction-history-size."));
882
23a7fe75
MM
883 btinfo = require_btrace ();
884 history = btinfo->insn_history;
885 if (history == NULL)
afedecd3 886 {
07bbe694 887 struct btrace_insn_iterator *replay;
afedecd3 888
9a24775b 889 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 890
07bbe694
MM
891 /* If we're replaying, we start at the replay position. Otherwise, we
892 start at the tail of the trace. */
893 replay = btinfo->replay;
894 if (replay != NULL)
895 begin = *replay;
896 else
897 btrace_insn_end (&begin, btinfo);
898
899 /* We start from here and expand in the requested direction. Then we
900 expand in the other direction, as well, to fill up any remaining
901 context. */
902 end = begin;
903 if (size < 0)
904 {
905 /* We want the current position covered, as well. */
906 covered = btrace_insn_next (&end, 1);
907 covered += btrace_insn_prev (&begin, context - covered);
908 covered += btrace_insn_next (&end, context - covered);
909 }
910 else
911 {
912 covered = btrace_insn_next (&end, context);
913 covered += btrace_insn_prev (&begin, context - covered);
914 }
afedecd3
MM
915 }
916 else
917 {
23a7fe75
MM
918 begin = history->begin;
919 end = history->end;
afedecd3 920
9a24775b 921 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 922 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 923
23a7fe75
MM
924 if (size < 0)
925 {
926 end = begin;
927 covered = btrace_insn_prev (&begin, context);
928 }
929 else
930 {
931 begin = end;
932 covered = btrace_insn_next (&end, context);
933 }
afedecd3
MM
934 }
935
23a7fe75 936 if (covered > 0)
31fd9caa 937 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
938 else
939 {
940 if (size < 0)
941 printf_unfiltered (_("At the start of the branch trace record.\n"));
942 else
943 printf_unfiltered (_("At the end of the branch trace record.\n"));
944 }
afedecd3 945
23a7fe75 946 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
947}
948
f6ac5f3d 949/* The insn_history_range method of target record-btrace. */
afedecd3 950
f6ac5f3d
PA
951void
952record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
953 gdb_disassembly_flags flags)
afedecd3
MM
954{
955 struct btrace_thread_info *btinfo;
23a7fe75 956 struct btrace_insn_iterator begin, end;
afedecd3 957 struct ui_out *uiout;
23a7fe75
MM
958 unsigned int low, high;
959 int found;
afedecd3
MM
960
961 uiout = current_uiout;
2e783024 962 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
963 low = from;
964 high = to;
afedecd3 965
9a24775b 966 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
967
968 /* Check for wrap-arounds. */
23a7fe75 969 if (low != from || high != to)
afedecd3
MM
970 error (_("Bad range."));
971
0688d04e 972 if (high < low)
afedecd3
MM
973 error (_("Bad range."));
974
23a7fe75 975 btinfo = require_btrace ();
afedecd3 976
23a7fe75
MM
977 found = btrace_find_insn_by_number (&begin, btinfo, low);
978 if (found == 0)
979 error (_("Range out of bounds."));
afedecd3 980
23a7fe75
MM
981 found = btrace_find_insn_by_number (&end, btinfo, high);
982 if (found == 0)
0688d04e
MM
983 {
984 /* Silently truncate the range. */
985 btrace_insn_end (&end, btinfo);
986 }
987 else
988 {
989 /* We want both begin and end to be inclusive. */
990 btrace_insn_next (&end, 1);
991 }
afedecd3 992
31fd9caa 993 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 994 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
995}
996
f6ac5f3d 997/* The insn_history_from method of target record-btrace. */
afedecd3 998
f6ac5f3d
PA
999void
1000record_btrace_target::insn_history_from (ULONGEST from, int size,
1001 gdb_disassembly_flags flags)
afedecd3
MM
1002{
1003 ULONGEST begin, end, context;
1004
1005 context = abs (size);
0688d04e
MM
1006 if (context == 0)
1007 error (_("Bad record instruction-history-size."));
afedecd3
MM
1008
1009 if (size < 0)
1010 {
1011 end = from;
1012
1013 if (from < context)
1014 begin = 0;
1015 else
0688d04e 1016 begin = from - context + 1;
afedecd3
MM
1017 }
1018 else
1019 {
1020 begin = from;
0688d04e 1021 end = from + context - 1;
afedecd3
MM
1022
1023 /* Check for wrap-around. */
1024 if (end < begin)
1025 end = ULONGEST_MAX;
1026 }
1027
f6ac5f3d 1028 insn_history_range (begin, end, flags);
afedecd3
MM
1029}
1030
1031/* Print the instruction number range for a function call history line. */
1032
1033static void
23a7fe75
MM
1034btrace_call_history_insn_range (struct ui_out *uiout,
1035 const struct btrace_function *bfun)
afedecd3 1036{
7acbe133
MM
1037 unsigned int begin, end, size;
1038
0860c437 1039 size = bfun->insn.size ();
7acbe133 1040 gdb_assert (size > 0);
afedecd3 1041
23a7fe75 1042 begin = bfun->insn_offset;
7acbe133 1043 end = begin + size - 1;
afedecd3 1044
1f77b012 1045 uiout->field_unsigned ("insn begin", begin);
112e8700 1046 uiout->text (",");
1f77b012 1047 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1048}
1049
ce0dfbea
MM
1050/* Compute the lowest and highest source line for the instructions in BFUN
1051 and return them in PBEGIN and PEND.
1052 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1053 result from inlining or macro expansion. */
1054
1055static void
1056btrace_compute_src_line_range (const struct btrace_function *bfun,
1057 int *pbegin, int *pend)
1058{
ce0dfbea
MM
1059 struct symtab *symtab;
1060 struct symbol *sym;
ce0dfbea
MM
1061 int begin, end;
1062
1063 begin = INT_MAX;
1064 end = INT_MIN;
1065
1066 sym = bfun->sym;
1067 if (sym == NULL)
1068 goto out;
1069
1070 symtab = symbol_symtab (sym);
1071
0860c437 1072 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1073 {
1074 struct symtab_and_line sal;
1075
0860c437 1076 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1077 if (sal.symtab != symtab || sal.line == 0)
1078 continue;
1079
325fac50
PA
1080 begin = std::min (begin, sal.line);
1081 end = std::max (end, sal.line);
ce0dfbea
MM
1082 }
1083
1084 out:
1085 *pbegin = begin;
1086 *pend = end;
1087}
1088
afedecd3
MM
1089/* Print the source line information for a function call history line. */
1090
1091static void
23a7fe75
MM
1092btrace_call_history_src_line (struct ui_out *uiout,
1093 const struct btrace_function *bfun)
afedecd3
MM
1094{
1095 struct symbol *sym;
23a7fe75 1096 int begin, end;
afedecd3
MM
1097
1098 sym = bfun->sym;
1099 if (sym == NULL)
1100 return;
1101
112e8700 1102 uiout->field_string ("file",
cbe56571 1103 symtab_to_filename_for_display (symbol_symtab (sym)),
e43b10e1 1104 file_name_style.style ());
afedecd3 1105
ce0dfbea 1106 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1107 if (end < begin)
afedecd3
MM
1108 return;
1109
112e8700 1110 uiout->text (":");
381befee 1111 uiout->field_signed ("min line", begin);
afedecd3 1112
23a7fe75 1113 if (end == begin)
afedecd3
MM
1114 return;
1115
112e8700 1116 uiout->text (",");
381befee 1117 uiout->field_signed ("max line", end);
afedecd3
MM
1118}
1119
0b722aec
MM
1120/* Get the name of a branch trace function. */
1121
1122static const char *
1123btrace_get_bfun_name (const struct btrace_function *bfun)
1124{
1125 struct minimal_symbol *msym;
1126 struct symbol *sym;
1127
1128 if (bfun == NULL)
1129 return "??";
1130
1131 msym = bfun->msym;
1132 sym = bfun->sym;
1133
1134 if (sym != NULL)
987012b8 1135 return sym->print_name ();
0b722aec 1136 else if (msym != NULL)
c9d95fa3 1137 return msym->print_name ();
0b722aec
MM
1138 else
1139 return "??";
1140}
1141
afedecd3
MM
1142/* Disassemble a section of the recorded function trace. */
1143
1144static void
23a7fe75 1145btrace_call_history (struct ui_out *uiout,
8710b709 1146 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1147 const struct btrace_call_iterator *begin,
1148 const struct btrace_call_iterator *end,
8d297bbf 1149 int int_flags)
afedecd3 1150{
23a7fe75 1151 struct btrace_call_iterator it;
8d297bbf 1152 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1153
8d297bbf 1154 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1155 btrace_call_number (end));
afedecd3 1156
23a7fe75 1157 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1158 {
23a7fe75
MM
1159 const struct btrace_function *bfun;
1160 struct minimal_symbol *msym;
1161 struct symbol *sym;
1162
1163 bfun = btrace_call_get (&it);
23a7fe75 1164 sym = bfun->sym;
0b722aec 1165 msym = bfun->msym;
23a7fe75 1166
afedecd3 1167 /* Print the function index. */
1f77b012 1168 uiout->field_unsigned ("index", bfun->number);
112e8700 1169 uiout->text ("\t");
afedecd3 1170
31fd9caa
MM
1171 /* Indicate gaps in the trace. */
1172 if (bfun->errcode != 0)
1173 {
1174 const struct btrace_config *conf;
1175
1176 conf = btrace_conf (btinfo);
1177
1178 /* We have trace so we must have a configuration. */
1179 gdb_assert (conf != NULL);
1180
1181 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1182
1183 continue;
1184 }
1185
8710b709
MM
1186 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1187 {
1188 int level = bfun->level + btinfo->level, i;
1189
1190 for (i = 0; i < level; ++i)
112e8700 1191 uiout->text (" ");
8710b709
MM
1192 }
1193
1194 if (sym != NULL)
987012b8 1195 uiout->field_string ("function", sym->print_name (),
e43b10e1 1196 function_name_style.style ());
8710b709 1197 else if (msym != NULL)
c9d95fa3 1198 uiout->field_string ("function", msym->print_name (),
e43b10e1 1199 function_name_style.style ());
112e8700 1200 else if (!uiout->is_mi_like_p ())
cbe56571 1201 uiout->field_string ("function", "??",
e43b10e1 1202 function_name_style.style ());
8710b709 1203
1e038f67 1204 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1205 {
112e8700 1206 uiout->text (_("\tinst "));
23a7fe75 1207 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1208 }
1209
1e038f67 1210 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1211 {
112e8700 1212 uiout->text (_("\tat "));
23a7fe75 1213 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1214 }
1215
112e8700 1216 uiout->text ("\n");
afedecd3
MM
1217 }
1218}
1219
f6ac5f3d 1220/* The call_history method of target record-btrace. */
afedecd3 1221
f6ac5f3d
PA
1222void
1223record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1224{
1225 struct btrace_thread_info *btinfo;
23a7fe75
MM
1226 struct btrace_call_history *history;
1227 struct btrace_call_iterator begin, end;
afedecd3 1228 struct ui_out *uiout;
23a7fe75 1229 unsigned int context, covered;
afedecd3
MM
1230
1231 uiout = current_uiout;
2e783024 1232 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1233 context = abs (size);
afedecd3
MM
1234 if (context == 0)
1235 error (_("Bad record function-call-history-size."));
1236
23a7fe75
MM
1237 btinfo = require_btrace ();
1238 history = btinfo->call_history;
1239 if (history == NULL)
afedecd3 1240 {
07bbe694 1241 struct btrace_insn_iterator *replay;
afedecd3 1242
0cb7c7b0 1243 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1244
07bbe694
MM
1245 /* If we're replaying, we start at the replay position. Otherwise, we
1246 start at the tail of the trace. */
1247 replay = btinfo->replay;
1248 if (replay != NULL)
1249 {
07bbe694 1250 begin.btinfo = btinfo;
a0f1b963 1251 begin.index = replay->call_index;
07bbe694
MM
1252 }
1253 else
1254 btrace_call_end (&begin, btinfo);
1255
1256 /* We start from here and expand in the requested direction. Then we
1257 expand in the other direction, as well, to fill up any remaining
1258 context. */
1259 end = begin;
1260 if (size < 0)
1261 {
1262 /* We want the current position covered, as well. */
1263 covered = btrace_call_next (&end, 1);
1264 covered += btrace_call_prev (&begin, context - covered);
1265 covered += btrace_call_next (&end, context - covered);
1266 }
1267 else
1268 {
1269 covered = btrace_call_next (&end, context);
1270 covered += btrace_call_prev (&begin, context- covered);
1271 }
afedecd3
MM
1272 }
1273 else
1274 {
23a7fe75
MM
1275 begin = history->begin;
1276 end = history->end;
afedecd3 1277
0cb7c7b0 1278 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1279 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1280
23a7fe75
MM
1281 if (size < 0)
1282 {
1283 end = begin;
1284 covered = btrace_call_prev (&begin, context);
1285 }
1286 else
1287 {
1288 begin = end;
1289 covered = btrace_call_next (&end, context);
1290 }
afedecd3
MM
1291 }
1292
23a7fe75 1293 if (covered > 0)
8710b709 1294 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1295 else
1296 {
1297 if (size < 0)
1298 printf_unfiltered (_("At the start of the branch trace record.\n"));
1299 else
1300 printf_unfiltered (_("At the end of the branch trace record.\n"));
1301 }
afedecd3 1302
23a7fe75 1303 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1304}
1305
f6ac5f3d 1306/* The call_history_range method of target record-btrace. */
afedecd3 1307
f6ac5f3d
PA
1308void
1309record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1310 record_print_flags flags)
afedecd3
MM
1311{
1312 struct btrace_thread_info *btinfo;
23a7fe75 1313 struct btrace_call_iterator begin, end;
afedecd3 1314 struct ui_out *uiout;
23a7fe75
MM
1315 unsigned int low, high;
1316 int found;
afedecd3
MM
1317
1318 uiout = current_uiout;
2e783024 1319 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1320 low = from;
1321 high = to;
afedecd3 1322
0cb7c7b0 1323 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1324
1325 /* Check for wrap-arounds. */
23a7fe75 1326 if (low != from || high != to)
afedecd3
MM
1327 error (_("Bad range."));
1328
0688d04e 1329 if (high < low)
afedecd3
MM
1330 error (_("Bad range."));
1331
23a7fe75 1332 btinfo = require_btrace ();
afedecd3 1333
23a7fe75
MM
1334 found = btrace_find_call_by_number (&begin, btinfo, low);
1335 if (found == 0)
1336 error (_("Range out of bounds."));
afedecd3 1337
23a7fe75
MM
1338 found = btrace_find_call_by_number (&end, btinfo, high);
1339 if (found == 0)
0688d04e
MM
1340 {
1341 /* Silently truncate the range. */
1342 btrace_call_end (&end, btinfo);
1343 }
1344 else
1345 {
1346 /* We want both begin and end to be inclusive. */
1347 btrace_call_next (&end, 1);
1348 }
afedecd3 1349
8710b709 1350 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1351 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1352}
1353
f6ac5f3d 1354/* The call_history_from method of target record-btrace. */
afedecd3 1355
f6ac5f3d
PA
1356void
1357record_btrace_target::call_history_from (ULONGEST from, int size,
1358 record_print_flags flags)
afedecd3
MM
1359{
1360 ULONGEST begin, end, context;
1361
1362 context = abs (size);
0688d04e
MM
1363 if (context == 0)
1364 error (_("Bad record function-call-history-size."));
afedecd3
MM
1365
1366 if (size < 0)
1367 {
1368 end = from;
1369
1370 if (from < context)
1371 begin = 0;
1372 else
0688d04e 1373 begin = from - context + 1;
afedecd3
MM
1374 }
1375 else
1376 {
1377 begin = from;
0688d04e 1378 end = from + context - 1;
afedecd3
MM
1379
1380 /* Check for wrap-around. */
1381 if (end < begin)
1382 end = ULONGEST_MAX;
1383 }
1384
f6ac5f3d 1385 call_history_range ( begin, end, flags);
afedecd3
MM
1386}
1387
f6ac5f3d 1388/* The record_method method of target record-btrace. */
b158a20f 1389
f6ac5f3d
PA
1390enum record_method
1391record_btrace_target::record_method (ptid_t ptid)
b158a20f 1392{
5b6d1e4f
PA
1393 process_stratum_target *proc_target = current_inferior ()->process_target ();
1394 thread_info *const tp = find_thread_ptid (proc_target, ptid);
b158a20f
TW
1395
1396 if (tp == NULL)
1397 error (_("No thread."));
1398
1399 if (tp->btrace.target == NULL)
1400 return RECORD_METHOD_NONE;
1401
1402 return RECORD_METHOD_BTRACE;
1403}
1404
f6ac5f3d 1405/* The record_is_replaying method of target record-btrace. */
07bbe694 1406
57810aa7 1407bool
f6ac5f3d 1408record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1409{
5b6d1e4f
PA
1410 process_stratum_target *proc_target = current_inferior ()->process_target ();
1411 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 1412 if (btrace_is_replaying (tp))
57810aa7 1413 return true;
07bbe694 1414
57810aa7 1415 return false;
07bbe694
MM
1416}
1417
f6ac5f3d 1418/* The record_will_replay method of target record-btrace. */
7ff27e9b 1419
57810aa7 1420bool
f6ac5f3d 1421record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1422{
f6ac5f3d 1423 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1424}
1425
f6ac5f3d 1426/* The xfer_partial method of target record-btrace. */
633785ff 1427
f6ac5f3d
PA
1428enum target_xfer_status
1429record_btrace_target::xfer_partial (enum target_object object,
1430 const char *annex, gdb_byte *readbuf,
1431 const gdb_byte *writebuf, ULONGEST offset,
1432 ULONGEST len, ULONGEST *xfered_len)
633785ff 1433{
633785ff 1434 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1435 if (replay_memory_access == replay_memory_access_read_only
aef92902 1436 && !record_btrace_generating_corefile
f6ac5f3d 1437 && record_is_replaying (inferior_ptid))
633785ff
MM
1438 {
1439 switch (object)
1440 {
1441 case TARGET_OBJECT_MEMORY:
1442 {
1443 struct target_section *section;
1444
1445 /* We do not allow writing memory in general. */
1446 if (writebuf != NULL)
9b409511
YQ
1447 {
1448 *xfered_len = len;
bc113b4e 1449 return TARGET_XFER_UNAVAILABLE;
9b409511 1450 }
633785ff
MM
1451
1452 /* We allow reading readonly memory. */
f6ac5f3d 1453 section = target_section_by_addr (this, offset);
633785ff
MM
1454 if (section != NULL)
1455 {
1456 /* Check if the section we found is readonly. */
fd361982 1457 if ((bfd_section_flags (section->the_bfd_section)
633785ff
MM
1458 & SEC_READONLY) != 0)
1459 {
1460 /* Truncate the request to fit into this section. */
325fac50 1461 len = std::min (len, section->endaddr - offset);
633785ff
MM
1462 break;
1463 }
1464 }
1465
9b409511 1466 *xfered_len = len;
bc113b4e 1467 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1468 }
1469 }
1470 }
1471
1472 /* Forward the request. */
b6a8c27b
PA
1473 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1474 offset, len, xfered_len);
633785ff
MM
1475}
1476
f6ac5f3d 1477/* The insert_breakpoint method of target record-btrace. */
633785ff 1478
f6ac5f3d
PA
1479int
1480record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1481 struct bp_target_info *bp_tgt)
633785ff 1482{
67b5c0c1
MM
1483 const char *old;
1484 int ret;
633785ff
MM
1485
1486 /* Inserting breakpoints requires accessing memory. Allow it for the
1487 duration of this function. */
67b5c0c1
MM
1488 old = replay_memory_access;
1489 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1490
1491 ret = 0;
a70b8144 1492 try
492d29ea 1493 {
b6a8c27b 1494 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1495 }
230d2906 1496 catch (const gdb_exception &except)
492d29ea 1497 {
6c63c96a 1498 replay_memory_access = old;
eedc3f4f 1499 throw;
492d29ea 1500 }
6c63c96a 1501 replay_memory_access = old;
633785ff
MM
1502
1503 return ret;
1504}
1505
f6ac5f3d 1506/* The remove_breakpoint method of target record-btrace. */
633785ff 1507
f6ac5f3d
PA
1508int
1509record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1510 struct bp_target_info *bp_tgt,
1511 enum remove_bp_reason reason)
633785ff 1512{
67b5c0c1
MM
1513 const char *old;
1514 int ret;
633785ff
MM
1515
1516 /* Removing breakpoints requires accessing memory. Allow it for the
1517 duration of this function. */
67b5c0c1
MM
1518 old = replay_memory_access;
1519 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1520
1521 ret = 0;
a70b8144 1522 try
492d29ea 1523 {
b6a8c27b 1524 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1525 }
230d2906 1526 catch (const gdb_exception &except)
492d29ea 1527 {
6c63c96a 1528 replay_memory_access = old;
eedc3f4f 1529 throw;
492d29ea 1530 }
6c63c96a 1531 replay_memory_access = old;
633785ff
MM
1532
1533 return ret;
1534}
1535
f6ac5f3d 1536/* The fetch_registers method of target record-btrace. */
1f3ef581 1537
f6ac5f3d
PA
1538void
1539record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581 1540{
1a476b6d
MM
1541 btrace_insn_iterator *replay = nullptr;
1542
1543 /* Thread-db may ask for a thread's registers before GDB knows about the
1544 thread. We forward the request to the target beneath in this
1545 case. */
5b6d1e4f 1546 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1a476b6d
MM
1547 if (tp != nullptr)
1548 replay = tp->btrace.replay;
1f3ef581 1549
1a476b6d 1550 if (replay != nullptr && !record_btrace_generating_corefile)
1f3ef581
MM
1551 {
1552 const struct btrace_insn *insn;
1553 struct gdbarch *gdbarch;
1554 int pcreg;
1555
ac7936df 1556 gdbarch = regcache->arch ();
1f3ef581
MM
1557 pcreg = gdbarch_pc_regnum (gdbarch);
1558 if (pcreg < 0)
1559 return;
1560
1561 /* We can only provide the PC register. */
1562 if (regno >= 0 && regno != pcreg)
1563 return;
1564
1565 insn = btrace_insn_get (replay);
1566 gdb_assert (insn != NULL);
1567
73e1c03f 1568 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1569 }
1570 else
b6a8c27b 1571 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1572}
1573
f6ac5f3d 1574/* The store_registers method of target record-btrace. */
1f3ef581 1575
f6ac5f3d
PA
1576void
1577record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1578{
a52eab48 1579 if (!record_btrace_generating_corefile
222312d3 1580 && record_is_replaying (regcache->ptid ()))
4d10e986 1581 error (_("Cannot write registers while replaying."));
1f3ef581 1582
491144b5 1583 gdb_assert (may_write_registers);
1f3ef581 1584
b6a8c27b 1585 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1586}
1587
f6ac5f3d 1588/* The prepare_to_store method of target record-btrace. */
1f3ef581 1589
f6ac5f3d
PA
1590void
1591record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1592{
a52eab48 1593 if (!record_btrace_generating_corefile
222312d3 1594 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1595 return;
1596
b6a8c27b 1597 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1598}
1599
0b722aec
MM
1600/* The branch trace frame cache. */
1601
1602struct btrace_frame_cache
1603{
1604 /* The thread. */
1605 struct thread_info *tp;
1606
1607 /* The frame info. */
1608 struct frame_info *frame;
1609
1610 /* The branch trace function segment. */
1611 const struct btrace_function *bfun;
1612};
1613
1614/* A struct btrace_frame_cache hash table indexed by NEXT. */
1615
1616static htab_t bfcache;
1617
1618/* hash_f for htab_create_alloc of bfcache. */
1619
1620static hashval_t
1621bfcache_hash (const void *arg)
1622{
19ba03f4
SM
1623 const struct btrace_frame_cache *cache
1624 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1625
1626 return htab_hash_pointer (cache->frame);
1627}
1628
1629/* eq_f for htab_create_alloc of bfcache. */
1630
1631static int
1632bfcache_eq (const void *arg1, const void *arg2)
1633{
19ba03f4
SM
1634 const struct btrace_frame_cache *cache1
1635 = (const struct btrace_frame_cache *) arg1;
1636 const struct btrace_frame_cache *cache2
1637 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1638
1639 return cache1->frame == cache2->frame;
1640}
1641
1642/* Create a new btrace frame cache. */
1643
1644static struct btrace_frame_cache *
1645bfcache_new (struct frame_info *frame)
1646{
1647 struct btrace_frame_cache *cache;
1648 void **slot;
1649
1650 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1651 cache->frame = frame;
1652
1653 slot = htab_find_slot (bfcache, cache, INSERT);
1654 gdb_assert (*slot == NULL);
1655 *slot = cache;
1656
1657 return cache;
1658}
1659
1660/* Extract the branch trace function from a branch trace frame. */
1661
1662static const struct btrace_function *
1663btrace_get_frame_function (struct frame_info *frame)
1664{
1665 const struct btrace_frame_cache *cache;
0b722aec
MM
1666 struct btrace_frame_cache pattern;
1667 void **slot;
1668
1669 pattern.frame = frame;
1670
1671 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1672 if (slot == NULL)
1673 return NULL;
1674
19ba03f4 1675 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1676 return cache->bfun;
1677}
1678
cecac1ab
MM
1679/* Implement stop_reason method for record_btrace_frame_unwind. */
1680
1681static enum unwind_stop_reason
1682record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1683 void **this_cache)
1684{
0b722aec
MM
1685 const struct btrace_frame_cache *cache;
1686 const struct btrace_function *bfun;
1687
19ba03f4 1688 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1689 bfun = cache->bfun;
1690 gdb_assert (bfun != NULL);
1691
42bfe59e 1692 if (bfun->up == 0)
0b722aec
MM
1693 return UNWIND_UNAVAILABLE;
1694
1695 return UNWIND_NO_REASON;
cecac1ab
MM
1696}
1697
1698/* Implement this_id method for record_btrace_frame_unwind. */
1699
1700static void
1701record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1702 struct frame_id *this_id)
1703{
0b722aec
MM
1704 const struct btrace_frame_cache *cache;
1705 const struct btrace_function *bfun;
4aeb0dfc 1706 struct btrace_call_iterator it;
0b722aec
MM
1707 CORE_ADDR code, special;
1708
19ba03f4 1709 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1710
1711 bfun = cache->bfun;
1712 gdb_assert (bfun != NULL);
1713
4aeb0dfc
TW
1714 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1715 bfun = btrace_call_get (&it);
0b722aec
MM
1716
1717 code = get_frame_func (this_frame);
1718 special = bfun->number;
1719
1720 *this_id = frame_id_build_unavailable_stack_special (code, special);
1721
1722 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1723 btrace_get_bfun_name (cache->bfun),
1724 core_addr_to_string_nz (this_id->code_addr),
1725 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1726}
1727
1728/* Implement prev_register method for record_btrace_frame_unwind. */
1729
1730static struct value *
1731record_btrace_frame_prev_register (struct frame_info *this_frame,
1732 void **this_cache,
1733 int regnum)
1734{
0b722aec
MM
1735 const struct btrace_frame_cache *cache;
1736 const struct btrace_function *bfun, *caller;
42bfe59e 1737 struct btrace_call_iterator it;
0b722aec
MM
1738 struct gdbarch *gdbarch;
1739 CORE_ADDR pc;
1740 int pcreg;
1741
1742 gdbarch = get_frame_arch (this_frame);
1743 pcreg = gdbarch_pc_regnum (gdbarch);
1744 if (pcreg < 0 || regnum != pcreg)
1745 throw_error (NOT_AVAILABLE_ERROR,
1746 _("Registers are not available in btrace record history"));
1747
19ba03f4 1748 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1749 bfun = cache->bfun;
1750 gdb_assert (bfun != NULL);
1751
42bfe59e 1752 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1753 throw_error (NOT_AVAILABLE_ERROR,
1754 _("No caller in btrace record history"));
1755
42bfe59e
TW
1756 caller = btrace_call_get (&it);
1757
0b722aec 1758 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1759 pc = caller->insn.front ().pc;
0b722aec
MM
1760 else
1761 {
0860c437 1762 pc = caller->insn.back ().pc;
0b722aec
MM
1763 pc += gdb_insn_length (gdbarch, pc);
1764 }
1765
1766 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1767 btrace_get_bfun_name (bfun), bfun->level,
1768 core_addr_to_string_nz (pc));
1769
1770 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1771}
1772
1773/* Implement sniffer method for record_btrace_frame_unwind. */
1774
1775static int
1776record_btrace_frame_sniffer (const struct frame_unwind *self,
1777 struct frame_info *this_frame,
1778 void **this_cache)
1779{
0b722aec
MM
1780 const struct btrace_function *bfun;
1781 struct btrace_frame_cache *cache;
cecac1ab 1782 struct thread_info *tp;
0b722aec 1783 struct frame_info *next;
cecac1ab
MM
1784
1785 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1786 tp = inferior_thread ();
cecac1ab 1787
0b722aec
MM
1788 bfun = NULL;
1789 next = get_next_frame (this_frame);
1790 if (next == NULL)
1791 {
1792 const struct btrace_insn_iterator *replay;
1793
1794 replay = tp->btrace.replay;
1795 if (replay != NULL)
08c3f6d2 1796 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1797 }
1798 else
1799 {
1800 const struct btrace_function *callee;
42bfe59e 1801 struct btrace_call_iterator it;
0b722aec
MM
1802
1803 callee = btrace_get_frame_function (next);
42bfe59e
TW
1804 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1805 return 0;
1806
1807 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1808 return 0;
1809
1810 bfun = btrace_call_get (&it);
0b722aec
MM
1811 }
1812
1813 if (bfun == NULL)
1814 return 0;
1815
1816 DEBUG ("[frame] sniffed frame for %s on level %d",
1817 btrace_get_bfun_name (bfun), bfun->level);
1818
1819 /* This is our frame. Initialize the frame cache. */
1820 cache = bfcache_new (this_frame);
1821 cache->tp = tp;
1822 cache->bfun = bfun;
1823
1824 *this_cache = cache;
1825 return 1;
1826}
1827
1828/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1829
1830static int
1831record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1832 struct frame_info *this_frame,
1833 void **this_cache)
1834{
1835 const struct btrace_function *bfun, *callee;
1836 struct btrace_frame_cache *cache;
42bfe59e 1837 struct btrace_call_iterator it;
0b722aec 1838 struct frame_info *next;
42bfe59e 1839 struct thread_info *tinfo;
0b722aec
MM
1840
1841 next = get_next_frame (this_frame);
1842 if (next == NULL)
1843 return 0;
1844
1845 callee = btrace_get_frame_function (next);
1846 if (callee == NULL)
1847 return 0;
1848
1849 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1850 return 0;
1851
00431a78 1852 tinfo = inferior_thread ();
42bfe59e 1853 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1854 return 0;
1855
42bfe59e
TW
1856 bfun = btrace_call_get (&it);
1857
0b722aec
MM
1858 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1859 btrace_get_bfun_name (bfun), bfun->level);
1860
1861 /* This is our frame. Initialize the frame cache. */
1862 cache = bfcache_new (this_frame);
42bfe59e 1863 cache->tp = tinfo;
0b722aec
MM
1864 cache->bfun = bfun;
1865
1866 *this_cache = cache;
1867 return 1;
1868}
1869
1870static void
1871record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1872{
1873 struct btrace_frame_cache *cache;
1874 void **slot;
1875
19ba03f4 1876 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1877
1878 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1879 gdb_assert (slot != NULL);
1880
1881 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1882}
1883
1884/* btrace recording does not store previous memory content, neither the stack
30baf67b 1885 frames content. Any unwinding would return erroneous results as the stack
cecac1ab
MM
1886 contents no longer matches the changed PC value restored from history.
1887 Therefore this unwinder reports any possibly unwound registers as
1888 <unavailable>. */
1889
0b722aec 1890const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1891{
1892 NORMAL_FRAME,
1893 record_btrace_frame_unwind_stop_reason,
1894 record_btrace_frame_this_id,
1895 record_btrace_frame_prev_register,
1896 NULL,
0b722aec
MM
1897 record_btrace_frame_sniffer,
1898 record_btrace_frame_dealloc_cache
1899};
1900
1901const struct frame_unwind record_btrace_tailcall_frame_unwind =
1902{
1903 TAILCALL_FRAME,
1904 record_btrace_frame_unwind_stop_reason,
1905 record_btrace_frame_this_id,
1906 record_btrace_frame_prev_register,
1907 NULL,
1908 record_btrace_tailcall_frame_sniffer,
1909 record_btrace_frame_dealloc_cache
cecac1ab 1910};
b2f4cfde 1911
f6ac5f3d 1912/* Implement the get_unwinder method. */
ac01945b 1913
f6ac5f3d
PA
1914const struct frame_unwind *
1915record_btrace_target::get_unwinder ()
ac01945b
TT
1916{
1917 return &record_btrace_frame_unwind;
1918}
1919
f6ac5f3d 1920/* Implement the get_tailcall_unwinder method. */
ac01945b 1921
f6ac5f3d
PA
1922const struct frame_unwind *
1923record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1924{
1925 return &record_btrace_tailcall_frame_unwind;
1926}
1927
987e68b1
MM
1928/* Return a human-readable string for FLAG. */
1929
1930static const char *
04902b09 1931btrace_thread_flag_to_str (btrace_thread_flags flag)
987e68b1
MM
1932{
1933 switch (flag)
1934 {
1935 case BTHR_STEP:
1936 return "step";
1937
1938 case BTHR_RSTEP:
1939 return "reverse-step";
1940
1941 case BTHR_CONT:
1942 return "cont";
1943
1944 case BTHR_RCONT:
1945 return "reverse-cont";
1946
1947 case BTHR_STOP:
1948 return "stop";
1949 }
1950
1951 return "<invalid>";
1952}
1953
52834460
MM
1954/* Indicate that TP should be resumed according to FLAG. */
1955
1956static void
1957record_btrace_resume_thread (struct thread_info *tp,
1958 enum btrace_thread_flag flag)
1959{
1960 struct btrace_thread_info *btinfo;
1961
43792cf0 1962 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1963 target_pid_to_str (tp->ptid).c_str (), flag,
1964 btrace_thread_flag_to_str (flag));
52834460
MM
1965
1966 btinfo = &tp->btrace;
1967
52834460 1968 /* Fetch the latest branch trace. */
4a4495d6 1969 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1970
0ca912df
MM
1971 /* A resume request overwrites a preceding resume or stop request. */
1972 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1973 btinfo->flags |= flag;
1974}
1975
ec71cc2f
MM
1976/* Get the current frame for TP. */
1977
79b8d3b0
TT
1978static struct frame_id
1979get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1980{
79b8d3b0 1981 struct frame_id id;
719546c4 1982 bool executing;
ec71cc2f 1983
00431a78
PA
1984 /* Set current thread, which is implicitly used by
1985 get_current_frame. */
1986 scoped_restore_current_thread restore_thread;
1987
1988 switch_to_thread (tp);
ec71cc2f 1989
5b6d1e4f
PA
1990 process_stratum_target *proc_target = tp->inf->process_target ();
1991
ec71cc2f
MM
1992 /* Clear the executing flag to allow changes to the current frame.
1993 We are not actually running, yet. We just started a reverse execution
1994 command or a record goto command.
1995 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1996 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f 1997 move the thread. Since we need to recompute the stack, we temporarily
85102364 1998 set EXECUTING to false. */
00431a78 1999 executing = tp->executing;
5b6d1e4f 2000 set_executing (proc_target, inferior_ptid, false);
ec71cc2f 2001
79b8d3b0 2002 id = null_frame_id;
a70b8144 2003 try
ec71cc2f 2004 {
79b8d3b0 2005 id = get_frame_id (get_current_frame ());
ec71cc2f 2006 }
230d2906 2007 catch (const gdb_exception &except)
ec71cc2f
MM
2008 {
2009 /* Restore the previous execution state. */
5b6d1e4f 2010 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2011
eedc3f4f 2012 throw;
ec71cc2f 2013 }
ec71cc2f
MM
2014
2015 /* Restore the previous execution state. */
5b6d1e4f 2016 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2017
79b8d3b0 2018 return id;
ec71cc2f
MM
2019}
2020
52834460
MM
2021/* Start replaying a thread. */
2022
2023static struct btrace_insn_iterator *
2024record_btrace_start_replaying (struct thread_info *tp)
2025{
52834460
MM
2026 struct btrace_insn_iterator *replay;
2027 struct btrace_thread_info *btinfo;
52834460
MM
2028
2029 btinfo = &tp->btrace;
2030 replay = NULL;
2031
2032 /* We can't start replaying without trace. */
b54b03bd 2033 if (btinfo->functions.empty ())
52834460
MM
2034 return NULL;
2035
52834460
MM
2036 /* GDB stores the current frame_id when stepping in order to detects steps
2037 into subroutines.
2038 Since frames are computed differently when we're replaying, we need to
2039 recompute those stored frames and fix them up so we can still detect
2040 subroutines after we started replaying. */
a70b8144 2041 try
52834460 2042 {
52834460
MM
2043 struct frame_id frame_id;
2044 int upd_step_frame_id, upd_step_stack_frame_id;
2045
2046 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2047 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2048
2049 /* Check if we need to update any stepping-related frame id's. */
2050 upd_step_frame_id = frame_id_eq (frame_id,
2051 tp->control.step_frame_id);
2052 upd_step_stack_frame_id = frame_id_eq (frame_id,
2053 tp->control.step_stack_frame_id);
2054
2055 /* We start replaying at the end of the branch trace. This corresponds
2056 to the current instruction. */
8d749320 2057 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2058 btrace_insn_end (replay, btinfo);
2059
31fd9caa
MM
2060 /* Skip gaps at the end of the trace. */
2061 while (btrace_insn_get (replay) == NULL)
2062 {
2063 unsigned int steps;
2064
2065 steps = btrace_insn_prev (replay, 1);
2066 if (steps == 0)
2067 error (_("No trace."));
2068 }
2069
52834460
MM
2070 /* We're not replaying, yet. */
2071 gdb_assert (btinfo->replay == NULL);
2072 btinfo->replay = replay;
2073
2074 /* Make sure we're not using any stale registers. */
00431a78 2075 registers_changed_thread (tp);
52834460
MM
2076
2077 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2078 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2079
2080 /* Replace stepping related frames where necessary. */
2081 if (upd_step_frame_id)
2082 tp->control.step_frame_id = frame_id;
2083 if (upd_step_stack_frame_id)
2084 tp->control.step_stack_frame_id = frame_id;
2085 }
230d2906 2086 catch (const gdb_exception &except)
52834460
MM
2087 {
2088 xfree (btinfo->replay);
2089 btinfo->replay = NULL;
2090
00431a78 2091 registers_changed_thread (tp);
52834460 2092
eedc3f4f 2093 throw;
52834460
MM
2094 }
2095
2096 return replay;
2097}
2098
2099/* Stop replaying a thread. */
2100
2101static void
2102record_btrace_stop_replaying (struct thread_info *tp)
2103{
2104 struct btrace_thread_info *btinfo;
2105
2106 btinfo = &tp->btrace;
2107
2108 xfree (btinfo->replay);
2109 btinfo->replay = NULL;
2110
2111 /* Make sure we're not leaving any stale registers. */
00431a78 2112 registers_changed_thread (tp);
52834460
MM
2113}
2114
e3cfc1c7
MM
2115/* Stop replaying TP if it is at the end of its execution history. */
2116
2117static void
2118record_btrace_stop_replaying_at_end (struct thread_info *tp)
2119{
2120 struct btrace_insn_iterator *replay, end;
2121 struct btrace_thread_info *btinfo;
2122
2123 btinfo = &tp->btrace;
2124 replay = btinfo->replay;
2125
2126 if (replay == NULL)
2127 return;
2128
2129 btrace_insn_end (&end, btinfo);
2130
2131 if (btrace_insn_cmp (replay, &end) == 0)
2132 record_btrace_stop_replaying (tp);
2133}
2134
f6ac5f3d 2135/* The resume method of target record-btrace. */
b2f4cfde 2136
f6ac5f3d
PA
2137void
2138record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2139{
d2939ba2 2140 enum btrace_thread_flag flag, cflag;
52834460 2141
a068643d 2142 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2143 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2144 step ? "step" : "cont");
52834460 2145
0ca912df
MM
2146 /* Store the execution direction of the last resume.
2147
f6ac5f3d 2148 If there is more than one resume call, we have to rely on infrun
0ca912df 2149 to not change the execution direction in-between. */
f6ac5f3d 2150 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2151
0ca912df 2152 /* As long as we're not replaying, just forward the request.
52834460 2153
0ca912df
MM
2154 For non-stop targets this means that no thread is replaying. In order to
2155 make progress, we may need to explicitly move replaying threads to the end
2156 of their execution history. */
f6ac5f3d
PA
2157 if ((::execution_direction != EXEC_REVERSE)
2158 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2159 {
b6a8c27b 2160 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2161 return;
b2f4cfde
MM
2162 }
2163
52834460 2164 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2165 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2166 {
2167 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2168 cflag = BTHR_RCONT;
2169 }
52834460 2170 else
d2939ba2
MM
2171 {
2172 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2173 cflag = BTHR_CONT;
2174 }
52834460 2175
52834460 2176 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2177 record_btrace_wait below.
2178
2179 For all-stop targets, we only step INFERIOR_PTID and continue others. */
5b6d1e4f
PA
2180
2181 process_stratum_target *proc_target = current_inferior ()->process_target ();
2182
d2939ba2
MM
2183 if (!target_is_non_stop_p ())
2184 {
26a57c92 2185 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2186
5b6d1e4f 2187 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2188 {
2189 if (tp->ptid.matches (inferior_ptid))
2190 record_btrace_resume_thread (tp, flag);
2191 else
2192 record_btrace_resume_thread (tp, cflag);
2193 }
d2939ba2
MM
2194 }
2195 else
2196 {
5b6d1e4f 2197 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 2198 record_btrace_resume_thread (tp, flag);
d2939ba2 2199 }
70ad5bff
MM
2200
2201 /* Async support. */
2202 if (target_can_async_p ())
2203 {
6a3753b3 2204 target_async (1);
70ad5bff
MM
2205 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2206 }
52834460
MM
2207}
2208
f6ac5f3d 2209/* The commit_resume method of target record-btrace. */
85ad3aaf 2210
f6ac5f3d
PA
2211void
2212record_btrace_target::commit_resume ()
85ad3aaf 2213{
f6ac5f3d
PA
2214 if ((::execution_direction != EXEC_REVERSE)
2215 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2216 beneath ()->commit_resume ();
85ad3aaf
PA
2217}
2218
987e68b1
MM
2219/* Cancel resuming TP. */
2220
2221static void
2222record_btrace_cancel_resume (struct thread_info *tp)
2223{
04902b09 2224 btrace_thread_flags flags;
987e68b1
MM
2225
2226 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2227 if (flags == 0)
2228 return;
2229
43792cf0
PA
2230 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2231 print_thread_id (tp),
04902b09 2232 target_pid_to_str (tp->ptid).c_str (), flags.raw (),
987e68b1
MM
2233 btrace_thread_flag_to_str (flags));
2234
2235 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2236 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2237}
2238
2239/* Return a target_waitstatus indicating that we ran out of history. */
2240
2241static struct target_waitstatus
2242btrace_step_no_history (void)
2243{
2244 struct target_waitstatus status;
2245
2246 status.kind = TARGET_WAITKIND_NO_HISTORY;
2247
2248 return status;
2249}
2250
2251/* Return a target_waitstatus indicating that a step finished. */
2252
2253static struct target_waitstatus
2254btrace_step_stopped (void)
2255{
2256 struct target_waitstatus status;
2257
2258 status.kind = TARGET_WAITKIND_STOPPED;
2259 status.value.sig = GDB_SIGNAL_TRAP;
2260
2261 return status;
2262}
2263
6e4879f0
MM
2264/* Return a target_waitstatus indicating that a thread was stopped as
2265 requested. */
2266
2267static struct target_waitstatus
2268btrace_step_stopped_on_request (void)
2269{
2270 struct target_waitstatus status;
2271
2272 status.kind = TARGET_WAITKIND_STOPPED;
2273 status.value.sig = GDB_SIGNAL_0;
2274
2275 return status;
2276}
2277
d825d248
MM
2278/* Return a target_waitstatus indicating a spurious stop. */
2279
2280static struct target_waitstatus
2281btrace_step_spurious (void)
2282{
2283 struct target_waitstatus status;
2284
2285 status.kind = TARGET_WAITKIND_SPURIOUS;
2286
2287 return status;
2288}
2289
e3cfc1c7
MM
2290/* Return a target_waitstatus indicating that the thread was not resumed. */
2291
2292static struct target_waitstatus
2293btrace_step_no_resumed (void)
2294{
2295 struct target_waitstatus status;
2296
2297 status.kind = TARGET_WAITKIND_NO_RESUMED;
2298
2299 return status;
2300}
2301
2302/* Return a target_waitstatus indicating that we should wait again. */
2303
2304static struct target_waitstatus
2305btrace_step_again (void)
2306{
2307 struct target_waitstatus status;
2308
2309 status.kind = TARGET_WAITKIND_IGNORE;
2310
2311 return status;
2312}
2313
52834460
MM
2314/* Clear the record histories. */
2315
2316static void
2317record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2318{
2319 xfree (btinfo->insn_history);
2320 xfree (btinfo->call_history);
2321
2322 btinfo->insn_history = NULL;
2323 btinfo->call_history = NULL;
2324}
2325
3c615f99
MM
2326/* Check whether TP's current replay position is at a breakpoint. */
2327
2328static int
2329record_btrace_replay_at_breakpoint (struct thread_info *tp)
2330{
2331 struct btrace_insn_iterator *replay;
2332 struct btrace_thread_info *btinfo;
2333 const struct btrace_insn *insn;
3c615f99
MM
2334
2335 btinfo = &tp->btrace;
2336 replay = btinfo->replay;
2337
2338 if (replay == NULL)
2339 return 0;
2340
2341 insn = btrace_insn_get (replay);
2342 if (insn == NULL)
2343 return 0;
2344
00431a78 2345 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2346 &btinfo->stop_reason);
2347}
2348
d825d248 2349/* Step one instruction in forward direction. */
52834460
MM
2350
2351static struct target_waitstatus
d825d248 2352record_btrace_single_step_forward (struct thread_info *tp)
52834460 2353{
b61ce85c 2354 struct btrace_insn_iterator *replay, end, start;
52834460 2355 struct btrace_thread_info *btinfo;
52834460 2356
d825d248
MM
2357 btinfo = &tp->btrace;
2358 replay = btinfo->replay;
2359
2360 /* We're done if we're not replaying. */
2361 if (replay == NULL)
2362 return btrace_step_no_history ();
2363
011c71b6
MM
2364 /* Check if we're stepping a breakpoint. */
2365 if (record_btrace_replay_at_breakpoint (tp))
2366 return btrace_step_stopped ();
2367
b61ce85c
MM
2368 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2369 jump back to the instruction at which we started. */
2370 start = *replay;
d825d248
MM
2371 do
2372 {
2373 unsigned int steps;
2374
e3cfc1c7
MM
2375 /* We will bail out here if we continue stepping after reaching the end
2376 of the execution history. */
d825d248
MM
2377 steps = btrace_insn_next (replay, 1);
2378 if (steps == 0)
b61ce85c
MM
2379 {
2380 *replay = start;
2381 return btrace_step_no_history ();
2382 }
d825d248
MM
2383 }
2384 while (btrace_insn_get (replay) == NULL);
2385
2386 /* Determine the end of the instruction trace. */
2387 btrace_insn_end (&end, btinfo);
2388
e3cfc1c7
MM
2389 /* The execution trace contains (and ends with) the current instruction.
2390 This instruction has not been executed, yet, so the trace really ends
2391 one instruction earlier. */
d825d248 2392 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2393 return btrace_step_no_history ();
d825d248
MM
2394
2395 return btrace_step_spurious ();
2396}
2397
2398/* Step one instruction in backward direction. */
2399
2400static struct target_waitstatus
2401record_btrace_single_step_backward (struct thread_info *tp)
2402{
b61ce85c 2403 struct btrace_insn_iterator *replay, start;
d825d248 2404 struct btrace_thread_info *btinfo;
e59fa00f 2405
52834460
MM
2406 btinfo = &tp->btrace;
2407 replay = btinfo->replay;
2408
d825d248
MM
2409 /* Start replaying if we're not already doing so. */
2410 if (replay == NULL)
2411 replay = record_btrace_start_replaying (tp);
2412
2413 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2414 Skip gaps during replay. If we end up at a gap (at the beginning of
2415 the trace), jump back to the instruction at which we started. */
2416 start = *replay;
d825d248
MM
2417 do
2418 {
2419 unsigned int steps;
2420
2421 steps = btrace_insn_prev (replay, 1);
2422 if (steps == 0)
b61ce85c
MM
2423 {
2424 *replay = start;
2425 return btrace_step_no_history ();
2426 }
d825d248
MM
2427 }
2428 while (btrace_insn_get (replay) == NULL);
2429
011c71b6
MM
2430 /* Check if we're stepping a breakpoint.
2431
2432 For reverse-stepping, this check is after the step. There is logic in
2433 infrun.c that handles reverse-stepping separately. See, for example,
2434 proceed and adjust_pc_after_break.
2435
2436 This code assumes that for reverse-stepping, PC points to the last
2437 de-executed instruction, whereas for forward-stepping PC points to the
2438 next to-be-executed instruction. */
2439 if (record_btrace_replay_at_breakpoint (tp))
2440 return btrace_step_stopped ();
2441
d825d248
MM
2442 return btrace_step_spurious ();
2443}
2444
2445/* Step a single thread. */
2446
2447static struct target_waitstatus
2448record_btrace_step_thread (struct thread_info *tp)
2449{
2450 struct btrace_thread_info *btinfo;
2451 struct target_waitstatus status;
04902b09 2452 btrace_thread_flags flags;
d825d248
MM
2453
2454 btinfo = &tp->btrace;
2455
6e4879f0
MM
2456 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2457 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2458
43792cf0 2459 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
04902b09 2460 target_pid_to_str (tp->ptid).c_str (), flags.raw (),
987e68b1 2461 btrace_thread_flag_to_str (flags));
52834460 2462
6e4879f0
MM
2463 /* We can't step without an execution history. */
2464 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2465 return btrace_step_no_history ();
2466
52834460
MM
2467 switch (flags)
2468 {
2469 default:
2470 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2471
6e4879f0
MM
2472 case BTHR_STOP:
2473 return btrace_step_stopped_on_request ();
2474
52834460 2475 case BTHR_STEP:
d825d248
MM
2476 status = record_btrace_single_step_forward (tp);
2477 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2478 break;
52834460
MM
2479
2480 return btrace_step_stopped ();
2481
2482 case BTHR_RSTEP:
d825d248
MM
2483 status = record_btrace_single_step_backward (tp);
2484 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2485 break;
52834460
MM
2486
2487 return btrace_step_stopped ();
2488
2489 case BTHR_CONT:
e3cfc1c7
MM
2490 status = record_btrace_single_step_forward (tp);
2491 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2492 break;
52834460 2493
e3cfc1c7
MM
2494 btinfo->flags |= flags;
2495 return btrace_step_again ();
52834460
MM
2496
2497 case BTHR_RCONT:
e3cfc1c7
MM
2498 status = record_btrace_single_step_backward (tp);
2499 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2500 break;
52834460 2501
e3cfc1c7
MM
2502 btinfo->flags |= flags;
2503 return btrace_step_again ();
2504 }
d825d248 2505
f6ac5f3d 2506 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2507 method will stop the thread for whom the event is reported. */
2508 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2509 btinfo->flags |= flags;
52834460 2510
e3cfc1c7 2511 return status;
b2f4cfde
MM
2512}
2513
a6b5be76
MM
2514/* Announce further events if necessary. */
2515
2516static void
53127008
SM
2517record_btrace_maybe_mark_async_event
2518 (const std::vector<thread_info *> &moving,
2519 const std::vector<thread_info *> &no_history)
a6b5be76 2520{
53127008
SM
2521 bool more_moving = !moving.empty ();
2522 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2523
2524 if (!more_moving && !more_no_history)
2525 return;
2526
2527 if (more_moving)
2528 DEBUG ("movers pending");
2529
2530 if (more_no_history)
2531 DEBUG ("no-history pending");
2532
2533 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2534}
2535
f6ac5f3d 2536/* The wait method of target record-btrace. */
b2f4cfde 2537
f6ac5f3d
PA
2538ptid_t
2539record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
b60cea74 2540 target_wait_flags options)
b2f4cfde 2541{
53127008
SM
2542 std::vector<thread_info *> moving;
2543 std::vector<thread_info *> no_history;
52834460 2544
b60cea74
TT
2545 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (),
2546 (unsigned) options);
52834460 2547
b2f4cfde 2548 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2549 if ((::execution_direction != EXEC_REVERSE)
2550 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2551 {
b6a8c27b 2552 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2553 }
2554
e3cfc1c7 2555 /* Keep a work list of moving threads. */
5b6d1e4f
PA
2556 process_stratum_target *proc_target = current_inferior ()->process_target ();
2557 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2558 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2559 moving.push_back (tp);
e3cfc1c7 2560
53127008 2561 if (moving.empty ())
52834460 2562 {
e3cfc1c7 2563 *status = btrace_step_no_resumed ();
52834460 2564
a068643d 2565 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2566 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2567
e3cfc1c7 2568 return null_ptid;
52834460
MM
2569 }
2570
e3cfc1c7
MM
2571 /* Step moving threads one by one, one step each, until either one thread
2572 reports an event or we run out of threads to step.
2573
2574 When stepping more than one thread, chances are that some threads reach
2575 the end of their execution history earlier than others. If we reported
2576 this immediately, all-stop on top of non-stop would stop all threads and
2577 resume the same threads next time. And we would report the same thread
2578 having reached the end of its execution history again.
2579
2580 In the worst case, this would starve the other threads. But even if other
2581 threads would be allowed to make progress, this would result in far too
2582 many intermediate stops.
2583
2584 We therefore delay the reporting of "no execution history" until we have
2585 nothing else to report. By this time, all threads should have moved to
2586 either the beginning or the end of their execution history. There will
2587 be a single user-visible stop. */
53127008
SM
2588 struct thread_info *eventing = NULL;
2589 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2590 {
53127008 2591 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2592 {
53127008
SM
2593 thread_info *tp = moving[ix];
2594
e3cfc1c7
MM
2595 *status = record_btrace_step_thread (tp);
2596
2597 switch (status->kind)
2598 {
2599 case TARGET_WAITKIND_IGNORE:
2600 ix++;
2601 break;
2602
2603 case TARGET_WAITKIND_NO_HISTORY:
53127008 2604 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2605 break;
2606
2607 default:
53127008 2608 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2609 break;
2610 }
2611 }
2612 }
2613
2614 if (eventing == NULL)
2615 {
2616 /* We started with at least one moving thread. This thread must have
2617 either stopped or reached the end of its execution history.
2618
2619 In the former case, EVENTING must not be NULL.
2620 In the latter case, NO_HISTORY must not be empty. */
53127008 2621 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2622
2623 /* We kept threads moving at the end of their execution history. Stop
2624 EVENTING now that we are going to report its stop. */
53127008 2625 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2626 eventing->btrace.flags &= ~BTHR_MOVE;
2627
2628 *status = btrace_step_no_history ();
2629 }
2630
2631 gdb_assert (eventing != NULL);
2632
2633 /* We kept threads replaying at the end of their execution history. Stop
2634 replaying EVENTING now that we are going to report its stop. */
2635 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2636
2637 /* Stop all other threads. */
5953356c 2638 if (!target_is_non_stop_p ())
53127008 2639 {
d89edf9b 2640 for (thread_info *tp : current_inferior ()->non_exited_threads ())
53127008
SM
2641 record_btrace_cancel_resume (tp);
2642 }
52834460 2643
a6b5be76
MM
2644 /* In async mode, we need to announce further events. */
2645 if (target_is_async_p ())
2646 record_btrace_maybe_mark_async_event (moving, no_history);
2647
52834460 2648 /* Start record histories anew from the current position. */
e3cfc1c7 2649 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2650
2651 /* We moved the replay position but did not update registers. */
00431a78 2652 registers_changed_thread (eventing);
e3cfc1c7 2653
43792cf0
PA
2654 DEBUG ("wait ended by thread %s (%s): %s",
2655 print_thread_id (eventing),
a068643d 2656 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2657 target_waitstatus_to_string (status).c_str ());
52834460 2658
e3cfc1c7 2659 return eventing->ptid;
52834460
MM
2660}
2661
f6ac5f3d 2662/* The stop method of target record-btrace. */
6e4879f0 2663
f6ac5f3d
PA
2664void
2665record_btrace_target::stop (ptid_t ptid)
6e4879f0 2666{
a068643d 2667 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2668
2669 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2670 if ((::execution_direction != EXEC_REVERSE)
2671 && !record_is_replaying (minus_one_ptid))
6e4879f0 2672 {
b6a8c27b 2673 this->beneath ()->stop (ptid);
6e4879f0
MM
2674 }
2675 else
2676 {
5b6d1e4f
PA
2677 process_stratum_target *proc_target
2678 = current_inferior ()->process_target ();
2679
2680 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2681 {
2682 tp->btrace.flags &= ~BTHR_MOVE;
2683 tp->btrace.flags |= BTHR_STOP;
2684 }
6e4879f0
MM
2685 }
2686 }
2687
f6ac5f3d 2688/* The can_execute_reverse method of target record-btrace. */
52834460 2689
57810aa7 2690bool
f6ac5f3d 2691record_btrace_target::can_execute_reverse ()
52834460 2692{
57810aa7 2693 return true;
52834460
MM
2694}
2695
f6ac5f3d 2696/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2697
57810aa7 2698bool
f6ac5f3d 2699record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2700{
f6ac5f3d 2701 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2702 {
2703 struct thread_info *tp = inferior_thread ();
2704
2705 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2706 }
2707
b6a8c27b 2708 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2709}
2710
f6ac5f3d 2711/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2712 record-btrace. */
2713
57810aa7 2714bool
f6ac5f3d 2715record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2716{
f6ac5f3d 2717 if (record_is_replaying (minus_one_ptid))
57810aa7 2718 return true;
9e8915c6 2719
b6a8c27b 2720 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2721}
2722
f6ac5f3d 2723/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2724
57810aa7 2725bool
f6ac5f3d 2726record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2727{
f6ac5f3d 2728 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2729 {
2730 struct thread_info *tp = inferior_thread ();
2731
2732 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2733 }
2734
b6a8c27b 2735 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2736}
2737
f6ac5f3d 2738/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2739 record-btrace. */
2740
57810aa7 2741bool
f6ac5f3d 2742record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2743{
f6ac5f3d 2744 if (record_is_replaying (minus_one_ptid))
57810aa7 2745 return true;
52834460 2746
b6a8c27b 2747 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2748}
2749
f6ac5f3d 2750/* The update_thread_list method of target record-btrace. */
e2887aa3 2751
f6ac5f3d
PA
2752void
2753record_btrace_target::update_thread_list ()
e2887aa3 2754{
e8032dde 2755 /* We don't add or remove threads during replay. */
f6ac5f3d 2756 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2757 return;
2758
2759 /* Forward the request. */
b6a8c27b 2760 this->beneath ()->update_thread_list ();
e2887aa3
MM
2761}
2762
f6ac5f3d 2763/* The thread_alive method of target record-btrace. */
e2887aa3 2764
57810aa7 2765bool
f6ac5f3d 2766record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2767{
2768 /* We don't add or remove threads during replay. */
f6ac5f3d 2769 if (record_is_replaying (minus_one_ptid))
00431a78 2770 return true;
e2887aa3
MM
2771
2772 /* Forward the request. */
b6a8c27b 2773 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2774}
2775
066ce621
MM
2776/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2777 is stopped. */
2778
2779static void
2780record_btrace_set_replay (struct thread_info *tp,
2781 const struct btrace_insn_iterator *it)
2782{
2783 struct btrace_thread_info *btinfo;
2784
2785 btinfo = &tp->btrace;
2786
a0f1b963 2787 if (it == NULL)
52834460 2788 record_btrace_stop_replaying (tp);
066ce621
MM
2789 else
2790 {
2791 if (btinfo->replay == NULL)
52834460 2792 record_btrace_start_replaying (tp);
066ce621
MM
2793 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2794 return;
2795
2796 *btinfo->replay = *it;
00431a78 2797 registers_changed_thread (tp);
066ce621
MM
2798 }
2799
52834460
MM
2800 /* Start anew from the new replay position. */
2801 record_btrace_clear_histories (btinfo);
485668e5 2802
f2ffa92b
PA
2803 inferior_thread ()->suspend.stop_pc
2804 = regcache_read_pc (get_current_regcache ());
485668e5 2805 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2806}
2807
f6ac5f3d 2808/* The goto_record_begin method of target record-btrace. */
066ce621 2809
f6ac5f3d
PA
2810void
2811record_btrace_target::goto_record_begin ()
066ce621
MM
2812{
2813 struct thread_info *tp;
2814 struct btrace_insn_iterator begin;
2815
2816 tp = require_btrace_thread ();
2817
2818 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2819
2820 /* Skip gaps at the beginning of the trace. */
2821 while (btrace_insn_get (&begin) == NULL)
2822 {
2823 unsigned int steps;
2824
2825 steps = btrace_insn_next (&begin, 1);
2826 if (steps == 0)
2827 error (_("No trace."));
2828 }
2829
066ce621 2830 record_btrace_set_replay (tp, &begin);
066ce621
MM
2831}
2832
f6ac5f3d 2833/* The goto_record_end method of target record-btrace. */
066ce621 2834
f6ac5f3d
PA
2835void
2836record_btrace_target::goto_record_end ()
066ce621
MM
2837{
2838 struct thread_info *tp;
2839
2840 tp = require_btrace_thread ();
2841
2842 record_btrace_set_replay (tp, NULL);
066ce621
MM
2843}
2844
f6ac5f3d 2845/* The goto_record method of target record-btrace. */
066ce621 2846
f6ac5f3d
PA
2847void
2848record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2849{
2850 struct thread_info *tp;
2851 struct btrace_insn_iterator it;
2852 unsigned int number;
2853 int found;
2854
2855 number = insn;
2856
2857 /* Check for wrap-arounds. */
2858 if (number != insn)
2859 error (_("Instruction number out of range."));
2860
2861 tp = require_btrace_thread ();
2862
2863 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2864
2865 /* Check if the instruction could not be found or is a gap. */
2866 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2867 error (_("No such instruction."));
2868
2869 record_btrace_set_replay (tp, &it);
066ce621
MM
2870}
2871
f6ac5f3d 2872/* The record_stop_replaying method of target record-btrace. */
797094dd 2873
f6ac5f3d
PA
2874void
2875record_btrace_target::record_stop_replaying ()
797094dd 2876{
d89edf9b 2877 for (thread_info *tp : current_inferior ()->non_exited_threads ())
797094dd
MM
2878 record_btrace_stop_replaying (tp);
2879}
2880
f6ac5f3d 2881/* The execution_direction target method. */
70ad5bff 2882
f6ac5f3d
PA
2883enum exec_direction_kind
2884record_btrace_target::execution_direction ()
70ad5bff
MM
2885{
2886 return record_btrace_resume_exec_dir;
2887}
2888
f6ac5f3d 2889/* The prepare_to_generate_core target method. */
aef92902 2890
f6ac5f3d
PA
2891void
2892record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2893{
2894 record_btrace_generating_corefile = 1;
2895}
2896
f6ac5f3d 2897/* The done_generating_core target method. */
aef92902 2898
f6ac5f3d
PA
2899void
2900record_btrace_target::done_generating_core ()
aef92902
MM
2901{
2902 record_btrace_generating_corefile = 0;
2903}
2904
f4abbc16
MM
2905/* Start recording in BTS format. */
2906
2907static void
cdb34d4a 2908cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2909{
f4abbc16
MM
2910 if (args != NULL && *args != 0)
2911 error (_("Invalid argument."));
2912
2913 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2914
a70b8144 2915 try
492d29ea 2916 {
95a6b0a1 2917 execute_command ("target record-btrace", from_tty);
492d29ea 2918 }
230d2906 2919 catch (const gdb_exception &exception)
f4abbc16
MM
2920 {
2921 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2922 throw;
f4abbc16
MM
2923 }
2924}
2925
bc504a31 2926/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2927
2928static void
cdb34d4a 2929cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2930{
2931 if (args != NULL && *args != 0)
2932 error (_("Invalid argument."));
2933
b20a6524 2934 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2935
a70b8144 2936 try
492d29ea 2937 {
95a6b0a1 2938 execute_command ("target record-btrace", from_tty);
492d29ea 2939 }
230d2906 2940 catch (const gdb_exception &exception)
492d29ea
PA
2941 {
2942 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2943 throw;
492d29ea 2944 }
afedecd3
MM
2945}
2946
b20a6524
MM
2947/* Alias for "target record". */
2948
2949static void
981a3fb3 2950cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2951{
2952 if (args != NULL && *args != 0)
2953 error (_("Invalid argument."));
2954
2955 record_btrace_conf.format = BTRACE_FORMAT_PT;
2956
a70b8144 2957 try
b20a6524 2958 {
95a6b0a1 2959 execute_command ("target record-btrace", from_tty);
b20a6524 2960 }
230d2906 2961 catch (const gdb_exception &exception)
b20a6524
MM
2962 {
2963 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2964
a70b8144 2965 try
b20a6524 2966 {
95a6b0a1 2967 execute_command ("target record-btrace", from_tty);
b20a6524 2968 }
230d2906 2969 catch (const gdb_exception &ex)
b20a6524
MM
2970 {
2971 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2972 throw;
b20a6524 2973 }
b20a6524 2974 }
b20a6524
MM
2975}
2976
67b5c0c1
MM
2977/* The "show record btrace replay-memory-access" command. */
2978
2979static void
2980cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2981 struct cmd_list_element *c, const char *value)
2982{
2983 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2984 replay_memory_access);
2985}
2986
4a4495d6
MM
2987/* The "set record btrace cpu none" command. */
2988
2989static void
2990cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2991{
2992 if (args != nullptr && *args != 0)
2993 error (_("Trailing junk: '%s'."), args);
2994
2995 record_btrace_cpu_state = CS_NONE;
2996}
2997
2998/* The "set record btrace cpu auto" command. */
2999
3000static void
3001cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
3002{
3003 if (args != nullptr && *args != 0)
3004 error (_("Trailing junk: '%s'."), args);
3005
3006 record_btrace_cpu_state = CS_AUTO;
3007}
3008
3009/* The "set record btrace cpu" command. */
3010
3011static void
3012cmd_set_record_btrace_cpu (const char *args, int from_tty)
3013{
3014 if (args == nullptr)
3015 args = "";
3016
3017 /* We use a hard-coded vendor string for now. */
3018 unsigned int family, model, stepping;
3019 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3020 &model, &l1, &stepping, &l2);
3021 if (matches == 3)
3022 {
3023 if (strlen (args) != l2)
3024 error (_("Trailing junk: '%s'."), args + l2);
3025 }
3026 else if (matches == 2)
3027 {
3028 if (strlen (args) != l1)
3029 error (_("Trailing junk: '%s'."), args + l1);
3030
3031 stepping = 0;
3032 }
3033 else
3034 error (_("Bad format. See \"help set record btrace cpu\"."));
3035
3036 if (USHRT_MAX < family)
3037 error (_("Cpu family too big."));
3038
3039 if (UCHAR_MAX < model)
3040 error (_("Cpu model too big."));
3041
3042 if (UCHAR_MAX < stepping)
3043 error (_("Cpu stepping too big."));
3044
3045 record_btrace_cpu.vendor = CV_INTEL;
3046 record_btrace_cpu.family = family;
3047 record_btrace_cpu.model = model;
3048 record_btrace_cpu.stepping = stepping;
3049
3050 record_btrace_cpu_state = CS_CPU;
3051}
3052
3053/* The "show record btrace cpu" command. */
3054
3055static void
3056cmd_show_record_btrace_cpu (const char *args, int from_tty)
3057{
4a4495d6
MM
3058 if (args != nullptr && *args != 0)
3059 error (_("Trailing junk: '%s'."), args);
3060
3061 switch (record_btrace_cpu_state)
3062 {
3063 case CS_AUTO:
3064 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3065 return;
3066
3067 case CS_NONE:
3068 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3069 return;
3070
3071 case CS_CPU:
3072 switch (record_btrace_cpu.vendor)
3073 {
3074 case CV_INTEL:
3075 if (record_btrace_cpu.stepping == 0)
3076 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3077 record_btrace_cpu.family,
3078 record_btrace_cpu.model);
3079 else
3080 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3081 record_btrace_cpu.family,
3082 record_btrace_cpu.model,
3083 record_btrace_cpu.stepping);
3084 return;
3085 }
3086 }
3087
3088 error (_("Internal error: bad cpu state."));
3089}
3090
b20a6524
MM
3091/* The "record bts buffer-size" show value function. */
3092
3093static void
3094show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3095 struct cmd_list_element *c,
3096 const char *value)
3097{
3098 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3099 value);
3100}
3101
3102/* The "record pt buffer-size" show value function. */
3103
3104static void
3105show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3106 struct cmd_list_element *c,
3107 const char *value)
3108{
3109 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3110 value);
3111}
3112
afedecd3
MM
3113/* Initialize btrace commands. */
3114
6c265988 3115void _initialize_record_btrace ();
afedecd3 3116void
6c265988 3117_initialize_record_btrace ()
afedecd3 3118{
f4abbc16
MM
3119 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3120 _("Start branch trace recording."), &record_btrace_cmdlist,
3121 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3122 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3123
f4abbc16
MM
3124 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3125 _("\
3126Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3127The processor stores a from/to record for each branch into a cyclic buffer.\n\
3128This format may not be available on all processors."),
3129 &record_btrace_cmdlist);
3130 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3131
b20a6524
MM
3132 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3133 _("\
bc504a31 3134Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3135This format may not be available on all processors."),
3136 &record_btrace_cmdlist);
3137 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3138
0743fc83
TT
3139 add_basic_prefix_cmd ("btrace", class_support,
3140 _("Set record options."), &set_record_btrace_cmdlist,
3141 "set record btrace ", 0, &set_record_cmdlist);
67b5c0c1 3142
0743fc83
TT
3143 add_show_prefix_cmd ("btrace", class_support,
3144 _("Show record options."), &show_record_btrace_cmdlist,
3145 "show record btrace ", 0, &show_record_cmdlist);
67b5c0c1
MM
3146
3147 add_setshow_enum_cmd ("replay-memory-access", no_class,
3148 replay_memory_access_types, &replay_memory_access, _("\
3149Set what memory accesses are allowed during replay."), _("\
3150Show what memory accesses are allowed during replay."),
3151 _("Default is READ-ONLY.\n\n\
3152The btrace record target does not trace data.\n\
3153The memory therefore corresponds to the live target and not \
3154to the current replay position.\n\n\
3155When READ-ONLY, allow accesses to read-only memory during replay.\n\
3156When READ-WRITE, allow accesses to read-only and read-write memory during \
3157replay."),
3158 NULL, cmd_show_replay_memory_access,
3159 &set_record_btrace_cmdlist,
3160 &show_record_btrace_cmdlist);
3161
4a4495d6
MM
3162 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3163 _("\
3164Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3165The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3166For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3167When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3168The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3169When GDB does not support that cpu, this option can be used to enable\n\
3170workarounds for a similar cpu that GDB supports.\n\n\
3171When set to \"none\", errata workarounds are disabled."),
3172 &set_record_btrace_cpu_cmdlist,
590042fc 3173 "set record btrace cpu ", 1,
4a4495d6
MM
3174 &set_record_btrace_cmdlist);
3175
3176 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3177Automatically determine the cpu to be used for trace decode."),
3178 &set_record_btrace_cpu_cmdlist);
3179
3180 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3181Do not enable errata workarounds for trace decode."),
3182 &set_record_btrace_cpu_cmdlist);
3183
3184 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3185Show the cpu to be used for trace decode."),
3186 &show_record_btrace_cmdlist);
3187
0743fc83
TT
3188 add_basic_prefix_cmd ("bts", class_support,
3189 _("Set record btrace bts options."),
3190 &set_record_btrace_bts_cmdlist,
3191 "set record btrace bts ", 0,
3192 &set_record_btrace_cmdlist);
d33501a5 3193
0743fc83
TT
3194 add_show_prefix_cmd ("bts", class_support,
3195 _("Show record btrace bts options."),
3196 &show_record_btrace_bts_cmdlist,
3197 "show record btrace bts ", 0,
3198 &show_record_btrace_cmdlist);
d33501a5
MM
3199
3200 add_setshow_uinteger_cmd ("buffer-size", no_class,
3201 &record_btrace_conf.bts.size,
3202 _("Set the record/replay bts buffer size."),
3203 _("Show the record/replay bts buffer size."), _("\
3204When starting recording request a trace buffer of this size. \
3205The actual buffer size may differ from the requested size. \
3206Use \"info record\" to see the actual buffer size.\n\n\
3207Bigger buffers allow longer recording but also take more time to process \
3208the recorded execution trace.\n\n\
b20a6524
MM
3209The trace buffer size may not be changed while recording."), NULL,
3210 show_record_bts_buffer_size_value,
d33501a5
MM
3211 &set_record_btrace_bts_cmdlist,
3212 &show_record_btrace_bts_cmdlist);
3213
0743fc83
TT
3214 add_basic_prefix_cmd ("pt", class_support,
3215 _("Set record btrace pt options."),
3216 &set_record_btrace_pt_cmdlist,
3217 "set record btrace pt ", 0,
3218 &set_record_btrace_cmdlist);
3219
3220 add_show_prefix_cmd ("pt", class_support,
3221 _("Show record btrace pt options."),
3222 &show_record_btrace_pt_cmdlist,
3223 "show record btrace pt ", 0,
3224 &show_record_btrace_cmdlist);
b20a6524
MM
3225
3226 add_setshow_uinteger_cmd ("buffer-size", no_class,
3227 &record_btrace_conf.pt.size,
3228 _("Set the record/replay pt buffer size."),
3229 _("Show the record/replay pt buffer size."), _("\
3230Bigger buffers allow longer recording but also take more time to process \
3231the recorded execution.\n\
3232The actual buffer size may differ from the requested size. Use \"info record\" \
3233to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3234 &set_record_btrace_pt_cmdlist,
3235 &show_record_btrace_pt_cmdlist);
3236
d9f719f1 3237 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3238
3239 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3240 xcalloc, xfree);
d33501a5
MM
3241
3242 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3243 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3244}