]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
gdb: remove target_gdbarch
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
213516ef 3 Copyright (C) 2013-2023 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
400b5eca 39#include "gdbsupport/event-loop.h"
70ad5bff 40#include "inf-loop.h"
00431a78 41#include "inferior.h"
325fac50 42#include <algorithm>
0d12e84c 43#include "gdbarch.h"
e43b10e1 44#include "cli/cli-style.h"
93b54c8e 45#include "async-event.h"
159ed7d9 46#include <forward_list>
1acc9dca 47#include "objfiles.h"
44fbffc6 48#include "interps.h"
afedecd3 49
d9f719f1
PA
50static const target_info record_btrace_target_info = {
51 "record-btrace",
52 N_("Branch tracing target"),
53 N_("Collect control-flow trace and provide the execution history.")
54};
55
afedecd3 56/* The target_ops of record-btrace. */
f6ac5f3d
PA
57
58class record_btrace_target final : public target_ops
59{
60public:
d9f719f1
PA
61 const target_info &info () const override
62 { return record_btrace_target_info; }
f6ac5f3d 63
66b4deae
PA
64 strata stratum () const override { return record_stratum; }
65
f6ac5f3d 66 void close () override;
4a570176 67 void async (bool) override;
f6ac5f3d
PA
68
69 void detach (inferior *inf, int from_tty) override
70 { record_detach (this, inf, from_tty); }
71
72 void disconnect (const char *, int) override;
73
74 void mourn_inferior () override
75 { record_mourn_inferior (this); }
76
77 void kill () override
78 { record_kill (this); }
79
80 enum record_method record_method (ptid_t ptid) override;
81
82 void stop_recording () override;
83 void info_record () override;
84
85 void insn_history (int size, gdb_disassembly_flags flags) override;
86 void insn_history_from (ULONGEST from, int size,
87 gdb_disassembly_flags flags) override;
88 void insn_history_range (ULONGEST begin, ULONGEST end,
89 gdb_disassembly_flags flags) override;
90 void call_history (int size, record_print_flags flags) override;
91 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
92 override;
93 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
94 override;
95
57810aa7
PA
96 bool record_is_replaying (ptid_t ptid) override;
97 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
98 void record_stop_replaying () override;
99
100 enum target_xfer_status xfer_partial (enum target_object object,
101 const char *annex,
102 gdb_byte *readbuf,
103 const gdb_byte *writebuf,
104 ULONGEST offset, ULONGEST len,
105 ULONGEST *xfered_len) override;
106
107 int insert_breakpoint (struct gdbarch *,
108 struct bp_target_info *) override;
109 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
110 enum remove_bp_reason) override;
111
112 void fetch_registers (struct regcache *, int) override;
113
114 void store_registers (struct regcache *, int) override;
115 void prepare_to_store (struct regcache *) override;
116
117 const struct frame_unwind *get_unwinder () override;
118
119 const struct frame_unwind *get_tailcall_unwinder () override;
120
f6ac5f3d 121 void resume (ptid_t, int, enum gdb_signal) override;
b60cea74 122 ptid_t wait (ptid_t, struct target_waitstatus *, target_wait_flags) override;
f6ac5f3d
PA
123
124 void stop (ptid_t) override;
125 void update_thread_list () override;
57810aa7 126 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
127 void goto_record_begin () override;
128 void goto_record_end () override;
129 void goto_record (ULONGEST insn) override;
130
57810aa7 131 bool can_execute_reverse () override;
f6ac5f3d 132
57810aa7
PA
133 bool stopped_by_sw_breakpoint () override;
134 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 135
57810aa7
PA
136 bool stopped_by_hw_breakpoint () override;
137 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
138
139 enum exec_direction_kind execution_direction () override;
140 void prepare_to_generate_core () override;
141 void done_generating_core () override;
142};
143
144static record_btrace_target record_btrace_ops;
145
146/* Initialize the record-btrace target ops. */
afedecd3 147
76727919
TT
148/* Token associated with a new-thread observer enabling branch tracing
149 for the new thread. */
3dcfdc58 150static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 151
67b5c0c1
MM
152/* Memory access types used in set/show record btrace replay-memory-access. */
153static const char replay_memory_access_read_only[] = "read-only";
154static const char replay_memory_access_read_write[] = "read-write";
155static const char *const replay_memory_access_types[] =
156{
157 replay_memory_access_read_only,
158 replay_memory_access_read_write,
159 NULL
160};
161
162/* The currently allowed replay memory access type. */
163static const char *replay_memory_access = replay_memory_access_read_only;
164
4a4495d6
MM
165/* The cpu state kinds. */
166enum record_btrace_cpu_state_kind
167{
168 CS_AUTO,
169 CS_NONE,
170 CS_CPU
171};
172
173/* The current cpu state. */
174static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
175
176/* The current cpu for trace decode. */
177static struct btrace_cpu record_btrace_cpu;
178
67b5c0c1
MM
179/* Command lists for "set/show record btrace". */
180static struct cmd_list_element *set_record_btrace_cmdlist;
181static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 182
70ad5bff
MM
183/* The execution direction of the last resume we got. See record-full.c. */
184static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
185
186/* The async event handler for reverse/replay execution. */
187static struct async_event_handler *record_btrace_async_inferior_event_handler;
188
aef92902
MM
189/* A flag indicating that we are currently generating a core file. */
190static int record_btrace_generating_corefile;
191
f4abbc16
MM
192/* The current branch trace configuration. */
193static struct btrace_config record_btrace_conf;
194
195/* Command list for "record btrace". */
196static struct cmd_list_element *record_btrace_cmdlist;
197
d33501a5
MM
198/* Command lists for "set/show record btrace bts". */
199static struct cmd_list_element *set_record_btrace_bts_cmdlist;
200static struct cmd_list_element *show_record_btrace_bts_cmdlist;
201
b20a6524
MM
202/* Command lists for "set/show record btrace pt". */
203static struct cmd_list_element *set_record_btrace_pt_cmdlist;
204static struct cmd_list_element *show_record_btrace_pt_cmdlist;
205
4a4495d6
MM
206/* Command list for "set record btrace cpu". */
207static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
208
afedecd3
MM
209/* Print a record-btrace debug message. Use do ... while (0) to avoid
210 ambiguities when used in if statements. */
211
212#define DEBUG(msg, args...) \
213 do \
214 { \
215 if (record_debug != 0) \
6cb06a8c
TT
216 gdb_printf (gdb_stdlog, \
217 "[record-btrace] " msg "\n", ##args); \
afedecd3
MM
218 } \
219 while (0)
220
221
4a4495d6
MM
222/* Return the cpu configured by the user. Returns NULL if the cpu was
223 configured as auto. */
224const struct btrace_cpu *
225record_btrace_get_cpu (void)
226{
227 switch (record_btrace_cpu_state)
228 {
229 case CS_AUTO:
230 return nullptr;
231
232 case CS_NONE:
233 record_btrace_cpu.vendor = CV_UNKNOWN;
234 /* Fall through. */
235 case CS_CPU:
236 return &record_btrace_cpu;
237 }
238
239 error (_("Internal error: bad record btrace cpu state."));
240}
241
afedecd3 242/* Update the branch trace for the current thread and return a pointer to its
066ce621 243 thread_info.
afedecd3
MM
244
245 Throws an error if there is no thread or no trace. This function never
246 returns NULL. */
247
066ce621
MM
248static struct thread_info *
249require_btrace_thread (void)
afedecd3 250{
afedecd3
MM
251 DEBUG ("require");
252
00431a78 253 if (inferior_ptid == null_ptid)
afedecd3
MM
254 error (_("No thread."));
255
00431a78
PA
256 thread_info *tp = inferior_thread ();
257
cd4007e4
MM
258 validate_registers_access ();
259
4a4495d6 260 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 261
6e07b1d2 262 if (btrace_is_empty (tp))
afedecd3
MM
263 error (_("No trace."));
264
066ce621
MM
265 return tp;
266}
267
268/* Update the branch trace for the current thread and return a pointer to its
269 branch trace information struct.
270
271 Throws an error if there is no thread or no trace. This function never
272 returns NULL. */
273
274static struct btrace_thread_info *
275require_btrace (void)
276{
277 struct thread_info *tp;
278
279 tp = require_btrace_thread ();
280
281 return &tp->btrace;
afedecd3
MM
282}
283
b02b0962 284/* The new thread observer. */
afedecd3
MM
285
286static void
b02b0962 287record_btrace_on_new_thread (struct thread_info *tp)
afedecd3 288{
d89edf9b
MM
289 /* Ignore this thread if its inferior is not recorded by us. */
290 target_ops *rec = tp->inf->target_at (record_stratum);
291 if (rec != &record_btrace_ops)
292 return;
293
a70b8144 294 try
492d29ea
PA
295 {
296 btrace_enable (tp, &record_btrace_conf);
297 }
230d2906 298 catch (const gdb_exception_error &error)
492d29ea 299 {
3d6e9d23 300 warning ("%s", error.what ());
492d29ea 301 }
afedecd3
MM
302}
303
afedecd3
MM
304/* Enable automatic tracing of new threads. */
305
306static void
307record_btrace_auto_enable (void)
308{
309 DEBUG ("attach thread observer");
310
b02b0962 311 gdb::observers::new_thread.attach (record_btrace_on_new_thread,
c90e7d63
SM
312 record_btrace_thread_observer_token,
313 "record-btrace");
afedecd3
MM
314}
315
316/* Disable automatic tracing of new threads. */
317
318static void
319record_btrace_auto_disable (void)
320{
afedecd3
MM
321 DEBUG ("detach thread observer");
322
76727919 323 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
324}
325
70ad5bff
MM
326/* The record-btrace async event handler function. */
327
328static void
329record_btrace_handle_async_inferior_event (gdb_client_data data)
330{
b1a35af2 331 inferior_event_handler (INF_REG_EVENT);
70ad5bff
MM
332}
333
c0272db5
TW
334/* See record-btrace.h. */
335
336void
337record_btrace_push_target (void)
338{
339 const char *format;
340
341 record_btrace_auto_enable ();
342
02980c56 343 current_inferior ()->push_target (&record_btrace_ops);
c0272db5
TW
344
345 record_btrace_async_inferior_event_handler
346 = create_async_event_handler (record_btrace_handle_async_inferior_event,
db20ebdf 347 NULL, "record-btrace");
c0272db5
TW
348 record_btrace_generating_corefile = 0;
349
350 format = btrace_format_short_string (record_btrace_conf.format);
44fbffc6 351 interps_notify_record_changed (current_inferior (), 1, "btrace", format);
c0272db5
TW
352}
353
228f1508
SM
354/* Disable btrace on a set of threads on scope exit. */
355
356struct scoped_btrace_disable
357{
358 scoped_btrace_disable () = default;
359
360 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
361
362 ~scoped_btrace_disable ()
363 {
364 for (thread_info *tp : m_threads)
365 btrace_disable (tp);
366 }
367
368 void add_thread (thread_info *thread)
369 {
370 m_threads.push_front (thread);
371 }
372
373 void discard ()
374 {
375 m_threads.clear ();
376 }
377
378private:
379 std::forward_list<thread_info *> m_threads;
380};
381
d9f719f1 382/* Open target record-btrace. */
afedecd3 383
d9f719f1
PA
384static void
385record_btrace_target_open (const char *args, int from_tty)
afedecd3 386{
228f1508
SM
387 /* If we fail to enable btrace for one thread, disable it for the threads for
388 which it was successfully enabled. */
389 scoped_btrace_disable btrace_disable;
afedecd3
MM
390
391 DEBUG ("open");
392
8213266a 393 record_preopen ();
afedecd3 394
55f6301a 395 if (!target_has_execution ())
afedecd3
MM
396 error (_("The program is not being run."));
397
d89edf9b 398 for (thread_info *tp : current_inferior ()->non_exited_threads ())
5d5658a1 399 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 400 {
f4abbc16 401 btrace_enable (tp, &record_btrace_conf);
afedecd3 402
228f1508 403 btrace_disable.add_thread (tp);
afedecd3
MM
404 }
405
c0272db5 406 record_btrace_push_target ();
afedecd3 407
228f1508 408 btrace_disable.discard ();
afedecd3
MM
409}
410
f6ac5f3d 411/* The stop_recording method of target record-btrace. */
afedecd3 412
f6ac5f3d
PA
413void
414record_btrace_target::stop_recording ()
afedecd3 415{
afedecd3
MM
416 DEBUG ("stop recording");
417
418 record_btrace_auto_disable ();
419
d89edf9b 420 for (thread_info *tp : current_inferior ()->non_exited_threads ())
afedecd3
MM
421 if (tp->btrace.target != NULL)
422 btrace_disable (tp);
423}
424
f6ac5f3d 425/* The disconnect method of target record-btrace. */
c0272db5 426
f6ac5f3d
PA
427void
428record_btrace_target::disconnect (const char *args,
429 int from_tty)
c0272db5 430{
b6a8c27b 431 struct target_ops *beneath = this->beneath ();
c0272db5
TW
432
433 /* Do not stop recording, just clean up GDB side. */
fadf6add 434 current_inferior ()->unpush_target (this);
c0272db5
TW
435
436 /* Forward disconnect. */
f6ac5f3d 437 beneath->disconnect (args, from_tty);
c0272db5
TW
438}
439
f6ac5f3d 440/* The close method of target record-btrace. */
afedecd3 441
f6ac5f3d
PA
442void
443record_btrace_target::close ()
afedecd3 444{
70ad5bff
MM
445 if (record_btrace_async_inferior_event_handler != NULL)
446 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
447
99c819ee
MM
448 /* Make sure automatic recording gets disabled even if we did not stop
449 recording before closing the record-btrace target. */
450 record_btrace_auto_disable ();
451
568e808b
MM
452 /* We should have already stopped recording.
453 Tear down btrace in case we have not. */
d89edf9b 454 for (thread_info *tp : current_inferior ()->non_exited_threads ())
568e808b 455 btrace_teardown (tp);
afedecd3
MM
456}
457
f6ac5f3d 458/* The async method of target record-btrace. */
b7d2e916 459
f6ac5f3d 460void
4a570176 461record_btrace_target::async (bool enable)
b7d2e916 462{
6a3753b3 463 if (enable)
b7d2e916
PA
464 mark_async_event_handler (record_btrace_async_inferior_event_handler);
465 else
466 clear_async_event_handler (record_btrace_async_inferior_event_handler);
467
b6a8c27b 468 this->beneath ()->async (enable);
b7d2e916
PA
469}
470
d33501a5
MM
471/* Adjusts the size and returns a human readable size suffix. */
472
473static const char *
474record_btrace_adjust_size (unsigned int *size)
475{
476 unsigned int sz;
477
478 sz = *size;
479
480 if ((sz & ((1u << 30) - 1)) == 0)
481 {
482 *size = sz >> 30;
483 return "GB";
484 }
485 else if ((sz & ((1u << 20) - 1)) == 0)
486 {
487 *size = sz >> 20;
488 return "MB";
489 }
490 else if ((sz & ((1u << 10) - 1)) == 0)
491 {
492 *size = sz >> 10;
493 return "kB";
494 }
495 else
496 return "";
497}
498
499/* Print a BTS configuration. */
500
501static void
502record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
503{
504 const char *suffix;
505 unsigned int size;
506
507 size = conf->size;
508 if (size > 0)
509 {
510 suffix = record_btrace_adjust_size (&size);
6cb06a8c 511 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
d33501a5
MM
512 }
513}
514
bc504a31 515/* Print an Intel Processor Trace configuration. */
b20a6524
MM
516
517static void
518record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
519{
520 const char *suffix;
521 unsigned int size;
522
523 size = conf->size;
524 if (size > 0)
525 {
526 suffix = record_btrace_adjust_size (&size);
6cb06a8c 527 gdb_printf (_("Buffer size: %u%s.\n"), size, suffix);
b20a6524
MM
528 }
529}
530
d33501a5
MM
531/* Print a branch tracing configuration. */
532
533static void
534record_btrace_print_conf (const struct btrace_config *conf)
535{
6cb06a8c
TT
536 gdb_printf (_("Recording format: %s.\n"),
537 btrace_format_string (conf->format));
d33501a5
MM
538
539 switch (conf->format)
540 {
541 case BTRACE_FORMAT_NONE:
542 return;
543
544 case BTRACE_FORMAT_BTS:
545 record_btrace_print_bts_conf (&conf->bts);
546 return;
b20a6524
MM
547
548 case BTRACE_FORMAT_PT:
549 record_btrace_print_pt_conf (&conf->pt);
550 return;
d33501a5
MM
551 }
552
f34652de 553 internal_error (_("Unknown branch trace format."));
d33501a5
MM
554}
555
f6ac5f3d 556/* The info_record method of target record-btrace. */
afedecd3 557
f6ac5f3d
PA
558void
559record_btrace_target::info_record ()
afedecd3
MM
560{
561 struct btrace_thread_info *btinfo;
f4abbc16 562 const struct btrace_config *conf;
afedecd3 563 struct thread_info *tp;
31fd9caa 564 unsigned int insns, calls, gaps;
afedecd3
MM
565
566 DEBUG ("info");
567
5b6d1e4f 568 if (inferior_ptid == null_ptid)
afedecd3
MM
569 error (_("No thread."));
570
5b6d1e4f
PA
571 tp = inferior_thread ();
572
cd4007e4
MM
573 validate_registers_access ();
574
f4abbc16
MM
575 btinfo = &tp->btrace;
576
f6ac5f3d 577 conf = ::btrace_conf (btinfo);
f4abbc16 578 if (conf != NULL)
d33501a5 579 record_btrace_print_conf (conf);
f4abbc16 580
4a4495d6 581 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 582
23a7fe75
MM
583 insns = 0;
584 calls = 0;
31fd9caa 585 gaps = 0;
23a7fe75 586
6e07b1d2 587 if (!btrace_is_empty (tp))
23a7fe75
MM
588 {
589 struct btrace_call_iterator call;
590 struct btrace_insn_iterator insn;
591
592 btrace_call_end (&call, btinfo);
593 btrace_call_prev (&call, 1);
5de9129b 594 calls = btrace_call_number (&call);
23a7fe75
MM
595
596 btrace_insn_end (&insn, btinfo);
5de9129b 597 insns = btrace_insn_number (&insn);
31fd9caa 598
69090cee
TW
599 /* If the last instruction is not a gap, it is the current instruction
600 that is not actually part of the record. */
601 if (btrace_insn_get (&insn) != NULL)
602 insns -= 1;
31fd9caa
MM
603
604 gaps = btinfo->ngaps;
23a7fe75 605 }
afedecd3 606
6cb06a8c
TT
607 gdb_printf (_("Recorded %u instructions in %u functions (%u gaps) "
608 "for thread %s (%s).\n"), insns, calls, gaps,
609 print_thread_id (tp),
610 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
611
612 if (btrace_is_replaying (tp))
6cb06a8c
TT
613 gdb_printf (_("Replay in progress. At instruction %u.\n"),
614 btrace_insn_number (btinfo->replay));
afedecd3
MM
615}
616
31fd9caa
MM
617/* Print a decode error. */
618
619static void
620btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
621 enum btrace_format format)
622{
508352a9 623 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 624
112e8700 625 uiout->text (_("["));
508352a9
TW
626 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
627 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 628 {
112e8700 629 uiout->text (_("decode error ("));
381befee 630 uiout->field_signed ("errcode", errcode);
112e8700 631 uiout->text (_("): "));
31fd9caa 632 }
112e8700
SM
633 uiout->text (errstr);
634 uiout->text (_("]\n"));
31fd9caa
MM
635}
636
f94cc897
MM
637/* A range of source lines. */
638
639struct btrace_line_range
640{
641 /* The symtab this line is from. */
642 struct symtab *symtab;
643
644 /* The first line (inclusive). */
645 int begin;
646
647 /* The last line (exclusive). */
648 int end;
649};
650
651/* Construct a line range. */
652
653static struct btrace_line_range
654btrace_mk_line_range (struct symtab *symtab, int begin, int end)
655{
656 struct btrace_line_range range;
657
658 range.symtab = symtab;
659 range.begin = begin;
660 range.end = end;
661
662 return range;
663}
664
665/* Add a line to a line range. */
666
667static struct btrace_line_range
668btrace_line_range_add (struct btrace_line_range range, int line)
669{
670 if (range.end <= range.begin)
671 {
672 /* This is the first entry. */
673 range.begin = line;
674 range.end = line + 1;
675 }
676 else if (line < range.begin)
677 range.begin = line;
678 else if (range.end < line)
679 range.end = line;
680
681 return range;
682}
683
684/* Return non-zero if RANGE is empty, zero otherwise. */
685
686static int
687btrace_line_range_is_empty (struct btrace_line_range range)
688{
689 return range.end <= range.begin;
690}
691
692/* Return non-zero if LHS contains RHS, zero otherwise. */
693
694static int
695btrace_line_range_contains_range (struct btrace_line_range lhs,
696 struct btrace_line_range rhs)
697{
698 return ((lhs.symtab == rhs.symtab)
699 && (lhs.begin <= rhs.begin)
700 && (rhs.end <= lhs.end));
701}
702
703/* Find the line range associated with PC. */
704
705static struct btrace_line_range
706btrace_find_line_range (CORE_ADDR pc)
707{
708 struct btrace_line_range range;
977a0c16
TT
709 const linetable_entry *lines;
710 const linetable *ltable;
f94cc897
MM
711 struct symtab *symtab;
712 int nlines, i;
713
714 symtab = find_pc_line_symtab (pc);
715 if (symtab == NULL)
716 return btrace_mk_line_range (NULL, 0, 0);
717
5b607461 718 ltable = symtab->linetable ();
f94cc897
MM
719 if (ltable == NULL)
720 return btrace_mk_line_range (symtab, 0, 0);
721
722 nlines = ltable->nitems;
723 lines = ltable->item;
724 if (nlines <= 0)
725 return btrace_mk_line_range (symtab, 0, 0);
726
1acc9dca 727 struct objfile *objfile = symtab->compunit ()->objfile ();
48e0f38c
TT
728 unrelocated_addr unrel_pc
729 = unrelocated_addr (pc - objfile->text_section_offset ());
1acc9dca 730
f94cc897
MM
731 range = btrace_mk_line_range (symtab, 0, 0);
732 for (i = 0; i < nlines - 1; i++)
733 {
8c95582d
AB
734 /* The test of is_stmt here was added when the is_stmt field was
735 introduced to the 'struct linetable_entry' structure. This
736 ensured that this loop maintained the same behaviour as before we
737 introduced is_stmt. That said, it might be that we would be
738 better off not checking is_stmt here, this would lead to us
739 possibly adding more line numbers to the range. At the time this
740 change was made I was unsure how to test this so chose to go with
741 maintaining the existing experience. */
0434c3ef 742 if (lines[i].unrelocated_pc () == unrel_pc && lines[i].line != 0
ddc6677b 743 && lines[i].is_stmt)
f94cc897
MM
744 range = btrace_line_range_add (range, lines[i].line);
745 }
746
747 return range;
748}
749
750/* Print source lines in LINES to UIOUT.
751
752 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
753 instructions corresponding to that source line. When printing a new source
754 line, we do the cleanups for the open chain and open a new cleanup chain for
755 the new source line. If the source line range in LINES is not empty, this
756 function will leave the cleanup chain for the last printed source line open
757 so instructions can be added to it. */
758
759static void
760btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
761 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
762 gdb::optional<ui_out_emit_list> *asm_list,
763 gdb_disassembly_flags flags)
f94cc897 764{
8d297bbf 765 print_source_lines_flags psl_flags;
f94cc897 766
f94cc897
MM
767 if (flags & DISASSEMBLY_FILENAME)
768 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
769
7ea78b59 770 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 771 {
7ea78b59 772 asm_list->reset ();
f94cc897 773
7ea78b59 774 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
775
776 print_source_lines (lines.symtab, line, line + 1, psl_flags);
777
7ea78b59 778 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
779 }
780}
781
afedecd3
MM
782/* Disassemble a section of the recorded instruction trace. */
783
784static void
23a7fe75 785btrace_insn_history (struct ui_out *uiout,
31fd9caa 786 const struct btrace_thread_info *btinfo,
23a7fe75 787 const struct btrace_insn_iterator *begin,
9a24775b
PA
788 const struct btrace_insn_iterator *end,
789 gdb_disassembly_flags flags)
afedecd3 790{
9a24775b
PA
791 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
792 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 793
f94cc897
MM
794 flags |= DISASSEMBLY_SPECULATIVE;
795
99d9c3b9 796 gdbarch *gdbarch = current_inferior ()->arch ();
7ea78b59 797 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 798
7ea78b59 799 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 800
7ea78b59
SM
801 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
802 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 803
046bebe1 804 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
8b172ce7 805
7ea78b59 806 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
dda83cd7 807 btrace_insn_next (&it, 1))
afedecd3 808 {
23a7fe75
MM
809 const struct btrace_insn *insn;
810
811 insn = btrace_insn_get (&it);
812
31fd9caa
MM
813 /* A NULL instruction indicates a gap in the trace. */
814 if (insn == NULL)
815 {
816 const struct btrace_config *conf;
817
818 conf = btrace_conf (btinfo);
afedecd3 819
31fd9caa
MM
820 /* We have trace so we must have a configuration. */
821 gdb_assert (conf != NULL);
822
69090cee
TW
823 uiout->field_fmt ("insn-number", "%u",
824 btrace_insn_number (&it));
825 uiout->text ("\t");
826
827 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
828 conf->format);
829 }
830 else
831 {
f94cc897 832 struct disasm_insn dinsn;
da8c46d2 833
f94cc897 834 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 835 {
f94cc897
MM
836 struct btrace_line_range lines;
837
838 lines = btrace_find_line_range (insn->pc);
839 if (!btrace_line_range_is_empty (lines)
840 && !btrace_line_range_contains_range (last_lines, lines))
841 {
7ea78b59
SM
842 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
843 flags);
f94cc897
MM
844 last_lines = lines;
845 }
7ea78b59 846 else if (!src_and_asm_tuple.has_value ())
f94cc897 847 {
7ea78b59
SM
848 gdb_assert (!asm_list.has_value ());
849
850 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
851
f94cc897 852 /* No source information. */
7ea78b59 853 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
854 }
855
7ea78b59
SM
856 gdb_assert (src_and_asm_tuple.has_value ());
857 gdb_assert (asm_list.has_value ());
da8c46d2 858 }
da8c46d2 859
f94cc897
MM
860 memset (&dinsn, 0, sizeof (dinsn));
861 dinsn.number = btrace_insn_number (&it);
862 dinsn.addr = insn->pc;
31fd9caa 863
da8c46d2 864 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 865 dinsn.is_speculative = 1;
da8c46d2 866
046bebe1 867 disasm.pretty_print_insn (&dinsn, flags);
31fd9caa 868 }
afedecd3
MM
869 }
870}
871
f6ac5f3d 872/* The insn_history method of target record-btrace. */
afedecd3 873
f6ac5f3d
PA
874void
875record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
876{
877 struct btrace_thread_info *btinfo;
23a7fe75
MM
878 struct btrace_insn_history *history;
879 struct btrace_insn_iterator begin, end;
afedecd3 880 struct ui_out *uiout;
23a7fe75 881 unsigned int context, covered;
afedecd3
MM
882
883 uiout = current_uiout;
2e783024 884 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 885 context = abs (size);
afedecd3
MM
886 if (context == 0)
887 error (_("Bad record instruction-history-size."));
888
23a7fe75
MM
889 btinfo = require_btrace ();
890 history = btinfo->insn_history;
891 if (history == NULL)
afedecd3 892 {
07bbe694 893 struct btrace_insn_iterator *replay;
afedecd3 894
9a24775b 895 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 896
07bbe694
MM
897 /* If we're replaying, we start at the replay position. Otherwise, we
898 start at the tail of the trace. */
899 replay = btinfo->replay;
900 if (replay != NULL)
901 begin = *replay;
902 else
903 btrace_insn_end (&begin, btinfo);
904
905 /* We start from here and expand in the requested direction. Then we
906 expand in the other direction, as well, to fill up any remaining
907 context. */
908 end = begin;
909 if (size < 0)
910 {
911 /* We want the current position covered, as well. */
912 covered = btrace_insn_next (&end, 1);
913 covered += btrace_insn_prev (&begin, context - covered);
914 covered += btrace_insn_next (&end, context - covered);
915 }
916 else
917 {
918 covered = btrace_insn_next (&end, context);
919 covered += btrace_insn_prev (&begin, context - covered);
920 }
afedecd3
MM
921 }
922 else
923 {
23a7fe75
MM
924 begin = history->begin;
925 end = history->end;
afedecd3 926
9a24775b 927 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 928 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 929
23a7fe75
MM
930 if (size < 0)
931 {
932 end = begin;
933 covered = btrace_insn_prev (&begin, context);
934 }
935 else
936 {
937 begin = end;
938 covered = btrace_insn_next (&end, context);
939 }
afedecd3
MM
940 }
941
23a7fe75 942 if (covered > 0)
31fd9caa 943 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
944 else
945 {
946 if (size < 0)
6cb06a8c 947 gdb_printf (_("At the start of the branch trace record.\n"));
23a7fe75 948 else
6cb06a8c 949 gdb_printf (_("At the end of the branch trace record.\n"));
23a7fe75 950 }
afedecd3 951
23a7fe75 952 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
953}
954
f6ac5f3d 955/* The insn_history_range method of target record-btrace. */
afedecd3 956
f6ac5f3d
PA
957void
958record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
959 gdb_disassembly_flags flags)
afedecd3
MM
960{
961 struct btrace_thread_info *btinfo;
23a7fe75 962 struct btrace_insn_iterator begin, end;
afedecd3 963 struct ui_out *uiout;
23a7fe75
MM
964 unsigned int low, high;
965 int found;
afedecd3
MM
966
967 uiout = current_uiout;
2e783024 968 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
969 low = from;
970 high = to;
afedecd3 971
9a24775b 972 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
973
974 /* Check for wrap-arounds. */
23a7fe75 975 if (low != from || high != to)
afedecd3
MM
976 error (_("Bad range."));
977
0688d04e 978 if (high < low)
afedecd3
MM
979 error (_("Bad range."));
980
23a7fe75 981 btinfo = require_btrace ();
afedecd3 982
23a7fe75
MM
983 found = btrace_find_insn_by_number (&begin, btinfo, low);
984 if (found == 0)
985 error (_("Range out of bounds."));
afedecd3 986
23a7fe75
MM
987 found = btrace_find_insn_by_number (&end, btinfo, high);
988 if (found == 0)
0688d04e
MM
989 {
990 /* Silently truncate the range. */
991 btrace_insn_end (&end, btinfo);
992 }
993 else
994 {
995 /* We want both begin and end to be inclusive. */
996 btrace_insn_next (&end, 1);
997 }
afedecd3 998
31fd9caa 999 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1000 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
1001}
1002
f6ac5f3d 1003/* The insn_history_from method of target record-btrace. */
afedecd3 1004
f6ac5f3d
PA
1005void
1006record_btrace_target::insn_history_from (ULONGEST from, int size,
1007 gdb_disassembly_flags flags)
afedecd3
MM
1008{
1009 ULONGEST begin, end, context;
1010
1011 context = abs (size);
0688d04e
MM
1012 if (context == 0)
1013 error (_("Bad record instruction-history-size."));
afedecd3
MM
1014
1015 if (size < 0)
1016 {
1017 end = from;
1018
1019 if (from < context)
1020 begin = 0;
1021 else
0688d04e 1022 begin = from - context + 1;
afedecd3
MM
1023 }
1024 else
1025 {
1026 begin = from;
0688d04e 1027 end = from + context - 1;
afedecd3
MM
1028
1029 /* Check for wrap-around. */
1030 if (end < begin)
1031 end = ULONGEST_MAX;
1032 }
1033
f6ac5f3d 1034 insn_history_range (begin, end, flags);
afedecd3
MM
1035}
1036
1037/* Print the instruction number range for a function call history line. */
1038
1039static void
23a7fe75
MM
1040btrace_call_history_insn_range (struct ui_out *uiout,
1041 const struct btrace_function *bfun)
afedecd3 1042{
7acbe133
MM
1043 unsigned int begin, end, size;
1044
0860c437 1045 size = bfun->insn.size ();
7acbe133 1046 gdb_assert (size > 0);
afedecd3 1047
23a7fe75 1048 begin = bfun->insn_offset;
7acbe133 1049 end = begin + size - 1;
afedecd3 1050
1f77b012 1051 uiout->field_unsigned ("insn begin", begin);
112e8700 1052 uiout->text (",");
1f77b012 1053 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1054}
1055
ce0dfbea
MM
1056/* Compute the lowest and highest source line for the instructions in BFUN
1057 and return them in PBEGIN and PEND.
1058 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1059 result from inlining or macro expansion. */
1060
1061static void
1062btrace_compute_src_line_range (const struct btrace_function *bfun,
1063 int *pbegin, int *pend)
1064{
ce0dfbea
MM
1065 struct symtab *symtab;
1066 struct symbol *sym;
ce0dfbea
MM
1067 int begin, end;
1068
1069 begin = INT_MAX;
1070 end = INT_MIN;
1071
1072 sym = bfun->sym;
1073 if (sym == NULL)
1074 goto out;
1075
4206d69e 1076 symtab = sym->symtab ();
ce0dfbea 1077
0860c437 1078 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1079 {
1080 struct symtab_and_line sal;
1081
0860c437 1082 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1083 if (sal.symtab != symtab || sal.line == 0)
1084 continue;
1085
325fac50
PA
1086 begin = std::min (begin, sal.line);
1087 end = std::max (end, sal.line);
ce0dfbea
MM
1088 }
1089
1090 out:
1091 *pbegin = begin;
1092 *pend = end;
1093}
1094
afedecd3
MM
1095/* Print the source line information for a function call history line. */
1096
1097static void
23a7fe75
MM
1098btrace_call_history_src_line (struct ui_out *uiout,
1099 const struct btrace_function *bfun)
afedecd3
MM
1100{
1101 struct symbol *sym;
23a7fe75 1102 int begin, end;
afedecd3
MM
1103
1104 sym = bfun->sym;
1105 if (sym == NULL)
1106 return;
1107
112e8700 1108 uiout->field_string ("file",
4206d69e 1109 symtab_to_filename_for_display (sym->symtab ()),
e43b10e1 1110 file_name_style.style ());
afedecd3 1111
ce0dfbea 1112 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1113 if (end < begin)
afedecd3
MM
1114 return;
1115
112e8700 1116 uiout->text (":");
381befee 1117 uiout->field_signed ("min line", begin);
afedecd3 1118
23a7fe75 1119 if (end == begin)
afedecd3
MM
1120 return;
1121
112e8700 1122 uiout->text (",");
381befee 1123 uiout->field_signed ("max line", end);
afedecd3
MM
1124}
1125
0b722aec
MM
1126/* Get the name of a branch trace function. */
1127
1128static const char *
1129btrace_get_bfun_name (const struct btrace_function *bfun)
1130{
1131 struct minimal_symbol *msym;
1132 struct symbol *sym;
1133
1134 if (bfun == NULL)
1135 return "??";
1136
1137 msym = bfun->msym;
1138 sym = bfun->sym;
1139
1140 if (sym != NULL)
987012b8 1141 return sym->print_name ();
0b722aec 1142 else if (msym != NULL)
c9d95fa3 1143 return msym->print_name ();
0b722aec
MM
1144 else
1145 return "??";
1146}
1147
afedecd3
MM
1148/* Disassemble a section of the recorded function trace. */
1149
1150static void
23a7fe75 1151btrace_call_history (struct ui_out *uiout,
8710b709 1152 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1153 const struct btrace_call_iterator *begin,
1154 const struct btrace_call_iterator *end,
8d297bbf 1155 int int_flags)
afedecd3 1156{
23a7fe75 1157 struct btrace_call_iterator it;
8d297bbf 1158 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1159
8d297bbf 1160 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1161 btrace_call_number (end));
afedecd3 1162
23a7fe75 1163 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1164 {
23a7fe75
MM
1165 const struct btrace_function *bfun;
1166 struct minimal_symbol *msym;
1167 struct symbol *sym;
1168
1169 bfun = btrace_call_get (&it);
23a7fe75 1170 sym = bfun->sym;
0b722aec 1171 msym = bfun->msym;
23a7fe75 1172
afedecd3 1173 /* Print the function index. */
1f77b012 1174 uiout->field_unsigned ("index", bfun->number);
112e8700 1175 uiout->text ("\t");
afedecd3 1176
31fd9caa
MM
1177 /* Indicate gaps in the trace. */
1178 if (bfun->errcode != 0)
1179 {
1180 const struct btrace_config *conf;
1181
1182 conf = btrace_conf (btinfo);
1183
1184 /* We have trace so we must have a configuration. */
1185 gdb_assert (conf != NULL);
1186
1187 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1188
1189 continue;
1190 }
1191
8710b709
MM
1192 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1193 {
1194 int level = bfun->level + btinfo->level, i;
1195
1196 for (i = 0; i < level; ++i)
112e8700 1197 uiout->text (" ");
8710b709
MM
1198 }
1199
1200 if (sym != NULL)
987012b8 1201 uiout->field_string ("function", sym->print_name (),
e43b10e1 1202 function_name_style.style ());
8710b709 1203 else if (msym != NULL)
c9d95fa3 1204 uiout->field_string ("function", msym->print_name (),
e43b10e1 1205 function_name_style.style ());
112e8700 1206 else if (!uiout->is_mi_like_p ())
cbe56571 1207 uiout->field_string ("function", "??",
e43b10e1 1208 function_name_style.style ());
8710b709 1209
1e038f67 1210 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1211 {
112e8700 1212 uiout->text (_("\tinst "));
23a7fe75 1213 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1214 }
1215
1e038f67 1216 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1217 {
112e8700 1218 uiout->text (_("\tat "));
23a7fe75 1219 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1220 }
1221
112e8700 1222 uiout->text ("\n");
afedecd3
MM
1223 }
1224}
1225
f6ac5f3d 1226/* The call_history method of target record-btrace. */
afedecd3 1227
f6ac5f3d
PA
1228void
1229record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1230{
1231 struct btrace_thread_info *btinfo;
23a7fe75
MM
1232 struct btrace_call_history *history;
1233 struct btrace_call_iterator begin, end;
afedecd3 1234 struct ui_out *uiout;
23a7fe75 1235 unsigned int context, covered;
afedecd3
MM
1236
1237 uiout = current_uiout;
2e783024 1238 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1239 context = abs (size);
afedecd3
MM
1240 if (context == 0)
1241 error (_("Bad record function-call-history-size."));
1242
23a7fe75
MM
1243 btinfo = require_btrace ();
1244 history = btinfo->call_history;
1245 if (history == NULL)
afedecd3 1246 {
07bbe694 1247 struct btrace_insn_iterator *replay;
afedecd3 1248
0cb7c7b0 1249 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1250
07bbe694
MM
1251 /* If we're replaying, we start at the replay position. Otherwise, we
1252 start at the tail of the trace. */
1253 replay = btinfo->replay;
1254 if (replay != NULL)
1255 {
07bbe694 1256 begin.btinfo = btinfo;
a0f1b963 1257 begin.index = replay->call_index;
07bbe694
MM
1258 }
1259 else
1260 btrace_call_end (&begin, btinfo);
1261
1262 /* We start from here and expand in the requested direction. Then we
1263 expand in the other direction, as well, to fill up any remaining
1264 context. */
1265 end = begin;
1266 if (size < 0)
1267 {
1268 /* We want the current position covered, as well. */
1269 covered = btrace_call_next (&end, 1);
1270 covered += btrace_call_prev (&begin, context - covered);
1271 covered += btrace_call_next (&end, context - covered);
1272 }
1273 else
1274 {
1275 covered = btrace_call_next (&end, context);
1276 covered += btrace_call_prev (&begin, context- covered);
1277 }
afedecd3
MM
1278 }
1279 else
1280 {
23a7fe75
MM
1281 begin = history->begin;
1282 end = history->end;
afedecd3 1283
0cb7c7b0 1284 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1285 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1286
23a7fe75
MM
1287 if (size < 0)
1288 {
1289 end = begin;
1290 covered = btrace_call_prev (&begin, context);
1291 }
1292 else
1293 {
1294 begin = end;
1295 covered = btrace_call_next (&end, context);
1296 }
afedecd3
MM
1297 }
1298
23a7fe75 1299 if (covered > 0)
8710b709 1300 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1301 else
1302 {
1303 if (size < 0)
6cb06a8c 1304 gdb_printf (_("At the start of the branch trace record.\n"));
23a7fe75 1305 else
6cb06a8c 1306 gdb_printf (_("At the end of the branch trace record.\n"));
23a7fe75 1307 }
afedecd3 1308
23a7fe75 1309 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1310}
1311
f6ac5f3d 1312/* The call_history_range method of target record-btrace. */
afedecd3 1313
f6ac5f3d
PA
1314void
1315record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1316 record_print_flags flags)
afedecd3
MM
1317{
1318 struct btrace_thread_info *btinfo;
23a7fe75 1319 struct btrace_call_iterator begin, end;
afedecd3 1320 struct ui_out *uiout;
23a7fe75
MM
1321 unsigned int low, high;
1322 int found;
afedecd3
MM
1323
1324 uiout = current_uiout;
2e783024 1325 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1326 low = from;
1327 high = to;
afedecd3 1328
0cb7c7b0 1329 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1330
1331 /* Check for wrap-arounds. */
23a7fe75 1332 if (low != from || high != to)
afedecd3
MM
1333 error (_("Bad range."));
1334
0688d04e 1335 if (high < low)
afedecd3
MM
1336 error (_("Bad range."));
1337
23a7fe75 1338 btinfo = require_btrace ();
afedecd3 1339
23a7fe75
MM
1340 found = btrace_find_call_by_number (&begin, btinfo, low);
1341 if (found == 0)
1342 error (_("Range out of bounds."));
afedecd3 1343
23a7fe75
MM
1344 found = btrace_find_call_by_number (&end, btinfo, high);
1345 if (found == 0)
0688d04e
MM
1346 {
1347 /* Silently truncate the range. */
1348 btrace_call_end (&end, btinfo);
1349 }
1350 else
1351 {
1352 /* We want both begin and end to be inclusive. */
1353 btrace_call_next (&end, 1);
1354 }
afedecd3 1355
8710b709 1356 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1357 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1358}
1359
f6ac5f3d 1360/* The call_history_from method of target record-btrace. */
afedecd3 1361
f6ac5f3d
PA
1362void
1363record_btrace_target::call_history_from (ULONGEST from, int size,
1364 record_print_flags flags)
afedecd3
MM
1365{
1366 ULONGEST begin, end, context;
1367
1368 context = abs (size);
0688d04e
MM
1369 if (context == 0)
1370 error (_("Bad record function-call-history-size."));
afedecd3
MM
1371
1372 if (size < 0)
1373 {
1374 end = from;
1375
1376 if (from < context)
1377 begin = 0;
1378 else
0688d04e 1379 begin = from - context + 1;
afedecd3
MM
1380 }
1381 else
1382 {
1383 begin = from;
0688d04e 1384 end = from + context - 1;
afedecd3
MM
1385
1386 /* Check for wrap-around. */
1387 if (end < begin)
1388 end = ULONGEST_MAX;
1389 }
1390
f6ac5f3d 1391 call_history_range ( begin, end, flags);
afedecd3
MM
1392}
1393
f6ac5f3d 1394/* The record_method method of target record-btrace. */
b158a20f 1395
f6ac5f3d
PA
1396enum record_method
1397record_btrace_target::record_method (ptid_t ptid)
b158a20f 1398{
5b6d1e4f 1399 process_stratum_target *proc_target = current_inferior ()->process_target ();
9213a6d7 1400 thread_info *const tp = proc_target->find_thread (ptid);
b158a20f
TW
1401
1402 if (tp == NULL)
1403 error (_("No thread."));
1404
1405 if (tp->btrace.target == NULL)
1406 return RECORD_METHOD_NONE;
1407
1408 return RECORD_METHOD_BTRACE;
1409}
1410
f6ac5f3d 1411/* The record_is_replaying method of target record-btrace. */
07bbe694 1412
57810aa7 1413bool
f6ac5f3d 1414record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1415{
5b6d1e4f
PA
1416 process_stratum_target *proc_target = current_inferior ()->process_target ();
1417 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 1418 if (btrace_is_replaying (tp))
57810aa7 1419 return true;
07bbe694 1420
57810aa7 1421 return false;
07bbe694
MM
1422}
1423
f6ac5f3d 1424/* The record_will_replay method of target record-btrace. */
7ff27e9b 1425
57810aa7 1426bool
f6ac5f3d 1427record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1428{
f6ac5f3d 1429 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1430}
1431
f6ac5f3d 1432/* The xfer_partial method of target record-btrace. */
633785ff 1433
f6ac5f3d
PA
1434enum target_xfer_status
1435record_btrace_target::xfer_partial (enum target_object object,
1436 const char *annex, gdb_byte *readbuf,
1437 const gdb_byte *writebuf, ULONGEST offset,
1438 ULONGEST len, ULONGEST *xfered_len)
633785ff 1439{
633785ff 1440 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1441 if (replay_memory_access == replay_memory_access_read_only
aef92902 1442 && !record_btrace_generating_corefile
f6ac5f3d 1443 && record_is_replaying (inferior_ptid))
633785ff
MM
1444 {
1445 switch (object)
1446 {
1447 case TARGET_OBJECT_MEMORY:
1448 {
19cf757a 1449 const struct target_section *section;
633785ff
MM
1450
1451 /* We do not allow writing memory in general. */
1452 if (writebuf != NULL)
9b409511
YQ
1453 {
1454 *xfered_len = len;
bc113b4e 1455 return TARGET_XFER_UNAVAILABLE;
9b409511 1456 }
633785ff
MM
1457
1458 /* We allow reading readonly memory. */
f6ac5f3d 1459 section = target_section_by_addr (this, offset);
633785ff
MM
1460 if (section != NULL)
1461 {
1462 /* Check if the section we found is readonly. */
fd361982 1463 if ((bfd_section_flags (section->the_bfd_section)
633785ff
MM
1464 & SEC_READONLY) != 0)
1465 {
1466 /* Truncate the request to fit into this section. */
325fac50 1467 len = std::min (len, section->endaddr - offset);
633785ff
MM
1468 break;
1469 }
1470 }
1471
9b409511 1472 *xfered_len = len;
bc113b4e 1473 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1474 }
1475 }
1476 }
1477
1478 /* Forward the request. */
b6a8c27b
PA
1479 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1480 offset, len, xfered_len);
633785ff
MM
1481}
1482
f6ac5f3d 1483/* The insert_breakpoint method of target record-btrace. */
633785ff 1484
f6ac5f3d
PA
1485int
1486record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1487 struct bp_target_info *bp_tgt)
633785ff 1488{
67b5c0c1
MM
1489 const char *old;
1490 int ret;
633785ff
MM
1491
1492 /* Inserting breakpoints requires accessing memory. Allow it for the
1493 duration of this function. */
67b5c0c1
MM
1494 old = replay_memory_access;
1495 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1496
1497 ret = 0;
a70b8144 1498 try
492d29ea 1499 {
b6a8c27b 1500 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1501 }
230d2906 1502 catch (const gdb_exception &except)
492d29ea 1503 {
6c63c96a 1504 replay_memory_access = old;
eedc3f4f 1505 throw;
492d29ea 1506 }
6c63c96a 1507 replay_memory_access = old;
633785ff
MM
1508
1509 return ret;
1510}
1511
f6ac5f3d 1512/* The remove_breakpoint method of target record-btrace. */
633785ff 1513
f6ac5f3d
PA
1514int
1515record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1516 struct bp_target_info *bp_tgt,
1517 enum remove_bp_reason reason)
633785ff 1518{
67b5c0c1
MM
1519 const char *old;
1520 int ret;
633785ff
MM
1521
1522 /* Removing breakpoints requires accessing memory. Allow it for the
1523 duration of this function. */
67b5c0c1
MM
1524 old = replay_memory_access;
1525 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1526
1527 ret = 0;
a70b8144 1528 try
492d29ea 1529 {
b6a8c27b 1530 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1531 }
230d2906 1532 catch (const gdb_exception &except)
492d29ea 1533 {
6c63c96a 1534 replay_memory_access = old;
eedc3f4f 1535 throw;
492d29ea 1536 }
6c63c96a 1537 replay_memory_access = old;
633785ff
MM
1538
1539 return ret;
1540}
1541
f6ac5f3d 1542/* The fetch_registers method of target record-btrace. */
1f3ef581 1543
f6ac5f3d
PA
1544void
1545record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581 1546{
1a476b6d
MM
1547 btrace_insn_iterator *replay = nullptr;
1548
1549 /* Thread-db may ask for a thread's registers before GDB knows about the
1550 thread. We forward the request to the target beneath in this
1551 case. */
2b214d3e
SM
1552 thread_info *tp
1553 = current_inferior ()->process_target ()->find_thread (regcache->ptid ());
1a476b6d
MM
1554 if (tp != nullptr)
1555 replay = tp->btrace.replay;
1f3ef581 1556
1a476b6d 1557 if (replay != nullptr && !record_btrace_generating_corefile)
1f3ef581
MM
1558 {
1559 const struct btrace_insn *insn;
1560 struct gdbarch *gdbarch;
1561 int pcreg;
1562
ac7936df 1563 gdbarch = regcache->arch ();
1f3ef581
MM
1564 pcreg = gdbarch_pc_regnum (gdbarch);
1565 if (pcreg < 0)
1566 return;
1567
1568 /* We can only provide the PC register. */
1569 if (regno >= 0 && regno != pcreg)
1570 return;
1571
1572 insn = btrace_insn_get (replay);
1573 gdb_assert (insn != NULL);
1574
73e1c03f 1575 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1576 }
1577 else
b6a8c27b 1578 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1579}
1580
f6ac5f3d 1581/* The store_registers method of target record-btrace. */
1f3ef581 1582
f6ac5f3d
PA
1583void
1584record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1585{
a52eab48 1586 if (!record_btrace_generating_corefile
222312d3 1587 && record_is_replaying (regcache->ptid ()))
4d10e986 1588 error (_("Cannot write registers while replaying."));
1f3ef581 1589
491144b5 1590 gdb_assert (may_write_registers);
1f3ef581 1591
b6a8c27b 1592 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1593}
1594
f6ac5f3d 1595/* The prepare_to_store method of target record-btrace. */
1f3ef581 1596
f6ac5f3d
PA
1597void
1598record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1599{
a52eab48 1600 if (!record_btrace_generating_corefile
222312d3 1601 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1602 return;
1603
b6a8c27b 1604 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1605}
1606
0b722aec
MM
1607/* The branch trace frame cache. */
1608
1609struct btrace_frame_cache
1610{
1611 /* The thread. */
1612 struct thread_info *tp;
1613
1614 /* The frame info. */
bd2b40ac 1615 frame_info *frame;
0b722aec
MM
1616
1617 /* The branch trace function segment. */
1618 const struct btrace_function *bfun;
1619};
1620
1621/* A struct btrace_frame_cache hash table indexed by NEXT. */
1622
1623static htab_t bfcache;
1624
1625/* hash_f for htab_create_alloc of bfcache. */
1626
1627static hashval_t
1628bfcache_hash (const void *arg)
1629{
19ba03f4
SM
1630 const struct btrace_frame_cache *cache
1631 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1632
1633 return htab_hash_pointer (cache->frame);
1634}
1635
1636/* eq_f for htab_create_alloc of bfcache. */
1637
1638static int
1639bfcache_eq (const void *arg1, const void *arg2)
1640{
19ba03f4
SM
1641 const struct btrace_frame_cache *cache1
1642 = (const struct btrace_frame_cache *) arg1;
1643 const struct btrace_frame_cache *cache2
1644 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1645
1646 return cache1->frame == cache2->frame;
1647}
1648
1649/* Create a new btrace frame cache. */
1650
1651static struct btrace_frame_cache *
bd2b40ac 1652bfcache_new (frame_info_ptr frame)
0b722aec
MM
1653{
1654 struct btrace_frame_cache *cache;
1655 void **slot;
1656
1657 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
bd2b40ac 1658 cache->frame = frame.get ();
0b722aec
MM
1659
1660 slot = htab_find_slot (bfcache, cache, INSERT);
1661 gdb_assert (*slot == NULL);
1662 *slot = cache;
1663
1664 return cache;
1665}
1666
1667/* Extract the branch trace function from a branch trace frame. */
1668
1669static const struct btrace_function *
bd2b40ac 1670btrace_get_frame_function (frame_info_ptr frame)
0b722aec
MM
1671{
1672 const struct btrace_frame_cache *cache;
0b722aec
MM
1673 struct btrace_frame_cache pattern;
1674 void **slot;
1675
bd2b40ac 1676 pattern.frame = frame.get ();
0b722aec
MM
1677
1678 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1679 if (slot == NULL)
1680 return NULL;
1681
19ba03f4 1682 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1683 return cache->bfun;
1684}
1685
cecac1ab
MM
1686/* Implement stop_reason method for record_btrace_frame_unwind. */
1687
1688static enum unwind_stop_reason
bd2b40ac 1689record_btrace_frame_unwind_stop_reason (frame_info_ptr this_frame,
cecac1ab
MM
1690 void **this_cache)
1691{
0b722aec
MM
1692 const struct btrace_frame_cache *cache;
1693 const struct btrace_function *bfun;
1694
19ba03f4 1695 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1696 bfun = cache->bfun;
1697 gdb_assert (bfun != NULL);
1698
42bfe59e 1699 if (bfun->up == 0)
0b722aec
MM
1700 return UNWIND_UNAVAILABLE;
1701
1702 return UNWIND_NO_REASON;
cecac1ab
MM
1703}
1704
1705/* Implement this_id method for record_btrace_frame_unwind. */
1706
1707static void
bd2b40ac 1708record_btrace_frame_this_id (frame_info_ptr this_frame, void **this_cache,
cecac1ab
MM
1709 struct frame_id *this_id)
1710{
0b722aec
MM
1711 const struct btrace_frame_cache *cache;
1712 const struct btrace_function *bfun;
4aeb0dfc 1713 struct btrace_call_iterator it;
0b722aec
MM
1714 CORE_ADDR code, special;
1715
19ba03f4 1716 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1717
1718 bfun = cache->bfun;
1719 gdb_assert (bfun != NULL);
1720
4aeb0dfc
TW
1721 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1722 bfun = btrace_call_get (&it);
0b722aec
MM
1723
1724 code = get_frame_func (this_frame);
1725 special = bfun->number;
1726
1727 *this_id = frame_id_build_unavailable_stack_special (code, special);
1728
1729 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1730 btrace_get_bfun_name (cache->bfun),
1731 core_addr_to_string_nz (this_id->code_addr),
1732 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1733}
1734
1735/* Implement prev_register method for record_btrace_frame_unwind. */
1736
1737static struct value *
bd2b40ac 1738record_btrace_frame_prev_register (frame_info_ptr this_frame,
cecac1ab
MM
1739 void **this_cache,
1740 int regnum)
1741{
0b722aec
MM
1742 const struct btrace_frame_cache *cache;
1743 const struct btrace_function *bfun, *caller;
42bfe59e 1744 struct btrace_call_iterator it;
0b722aec
MM
1745 struct gdbarch *gdbarch;
1746 CORE_ADDR pc;
1747 int pcreg;
1748
1749 gdbarch = get_frame_arch (this_frame);
1750 pcreg = gdbarch_pc_regnum (gdbarch);
1751 if (pcreg < 0 || regnum != pcreg)
1752 throw_error (NOT_AVAILABLE_ERROR,
1753 _("Registers are not available in btrace record history"));
1754
19ba03f4 1755 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1756 bfun = cache->bfun;
1757 gdb_assert (bfun != NULL);
1758
42bfe59e 1759 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1760 throw_error (NOT_AVAILABLE_ERROR,
1761 _("No caller in btrace record history"));
1762
42bfe59e
TW
1763 caller = btrace_call_get (&it);
1764
0b722aec 1765 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1766 pc = caller->insn.front ().pc;
0b722aec
MM
1767 else
1768 {
0860c437 1769 pc = caller->insn.back ().pc;
0b722aec
MM
1770 pc += gdb_insn_length (gdbarch, pc);
1771 }
1772
1773 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1774 btrace_get_bfun_name (bfun), bfun->level,
1775 core_addr_to_string_nz (pc));
1776
1777 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1778}
1779
1780/* Implement sniffer method for record_btrace_frame_unwind. */
1781
1782static int
1783record_btrace_frame_sniffer (const struct frame_unwind *self,
bd2b40ac 1784 frame_info_ptr this_frame,
cecac1ab
MM
1785 void **this_cache)
1786{
0b722aec
MM
1787 const struct btrace_function *bfun;
1788 struct btrace_frame_cache *cache;
cecac1ab 1789 struct thread_info *tp;
bd2b40ac 1790 frame_info_ptr next;
cecac1ab
MM
1791
1792 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1793 tp = inferior_thread ();
cecac1ab 1794
0b722aec
MM
1795 bfun = NULL;
1796 next = get_next_frame (this_frame);
1797 if (next == NULL)
1798 {
1799 const struct btrace_insn_iterator *replay;
1800
1801 replay = tp->btrace.replay;
1802 if (replay != NULL)
08c3f6d2 1803 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1804 }
1805 else
1806 {
1807 const struct btrace_function *callee;
42bfe59e 1808 struct btrace_call_iterator it;
0b722aec
MM
1809
1810 callee = btrace_get_frame_function (next);
42bfe59e
TW
1811 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1812 return 0;
1813
1814 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1815 return 0;
1816
1817 bfun = btrace_call_get (&it);
0b722aec
MM
1818 }
1819
1820 if (bfun == NULL)
1821 return 0;
1822
1823 DEBUG ("[frame] sniffed frame for %s on level %d",
1824 btrace_get_bfun_name (bfun), bfun->level);
1825
1826 /* This is our frame. Initialize the frame cache. */
1827 cache = bfcache_new (this_frame);
1828 cache->tp = tp;
1829 cache->bfun = bfun;
1830
1831 *this_cache = cache;
1832 return 1;
1833}
1834
1835/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1836
1837static int
1838record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
bd2b40ac 1839 frame_info_ptr this_frame,
0b722aec
MM
1840 void **this_cache)
1841{
1842 const struct btrace_function *bfun, *callee;
1843 struct btrace_frame_cache *cache;
42bfe59e 1844 struct btrace_call_iterator it;
bd2b40ac 1845 frame_info_ptr next;
42bfe59e 1846 struct thread_info *tinfo;
0b722aec
MM
1847
1848 next = get_next_frame (this_frame);
1849 if (next == NULL)
1850 return 0;
1851
1852 callee = btrace_get_frame_function (next);
1853 if (callee == NULL)
1854 return 0;
1855
1856 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1857 return 0;
1858
00431a78 1859 tinfo = inferior_thread ();
42bfe59e 1860 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1861 return 0;
1862
42bfe59e
TW
1863 bfun = btrace_call_get (&it);
1864
0b722aec
MM
1865 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1866 btrace_get_bfun_name (bfun), bfun->level);
1867
1868 /* This is our frame. Initialize the frame cache. */
1869 cache = bfcache_new (this_frame);
42bfe59e 1870 cache->tp = tinfo;
0b722aec
MM
1871 cache->bfun = bfun;
1872
1873 *this_cache = cache;
1874 return 1;
1875}
1876
1877static void
bd2b40ac 1878record_btrace_frame_dealloc_cache (frame_info *self, void *this_cache)
0b722aec
MM
1879{
1880 struct btrace_frame_cache *cache;
1881 void **slot;
1882
19ba03f4 1883 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1884
1885 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1886 gdb_assert (slot != NULL);
1887
1888 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1889}
1890
1891/* btrace recording does not store previous memory content, neither the stack
30baf67b 1892 frames content. Any unwinding would return erroneous results as the stack
cecac1ab
MM
1893 contents no longer matches the changed PC value restored from history.
1894 Therefore this unwinder reports any possibly unwound registers as
1895 <unavailable>. */
1896
0b722aec 1897const struct frame_unwind record_btrace_frame_unwind =
cecac1ab 1898{
a154d838 1899 "record-btrace",
cecac1ab
MM
1900 NORMAL_FRAME,
1901 record_btrace_frame_unwind_stop_reason,
1902 record_btrace_frame_this_id,
1903 record_btrace_frame_prev_register,
1904 NULL,
0b722aec
MM
1905 record_btrace_frame_sniffer,
1906 record_btrace_frame_dealloc_cache
1907};
1908
1909const struct frame_unwind record_btrace_tailcall_frame_unwind =
1910{
a154d838 1911 "record-btrace tailcall",
0b722aec
MM
1912 TAILCALL_FRAME,
1913 record_btrace_frame_unwind_stop_reason,
1914 record_btrace_frame_this_id,
1915 record_btrace_frame_prev_register,
1916 NULL,
1917 record_btrace_tailcall_frame_sniffer,
1918 record_btrace_frame_dealloc_cache
cecac1ab 1919};
b2f4cfde 1920
f6ac5f3d 1921/* Implement the get_unwinder method. */
ac01945b 1922
f6ac5f3d
PA
1923const struct frame_unwind *
1924record_btrace_target::get_unwinder ()
ac01945b
TT
1925{
1926 return &record_btrace_frame_unwind;
1927}
1928
f6ac5f3d 1929/* Implement the get_tailcall_unwinder method. */
ac01945b 1930
f6ac5f3d
PA
1931const struct frame_unwind *
1932record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1933{
1934 return &record_btrace_tailcall_frame_unwind;
1935}
1936
987e68b1
MM
1937/* Return a human-readable string for FLAG. */
1938
1939static const char *
04902b09 1940btrace_thread_flag_to_str (btrace_thread_flags flag)
987e68b1
MM
1941{
1942 switch (flag)
1943 {
1944 case BTHR_STEP:
1945 return "step";
1946
1947 case BTHR_RSTEP:
1948 return "reverse-step";
1949
1950 case BTHR_CONT:
1951 return "cont";
1952
1953 case BTHR_RCONT:
1954 return "reverse-cont";
1955
1956 case BTHR_STOP:
1957 return "stop";
1958 }
1959
1960 return "<invalid>";
1961}
1962
52834460
MM
1963/* Indicate that TP should be resumed according to FLAG. */
1964
1965static void
1966record_btrace_resume_thread (struct thread_info *tp,
1967 enum btrace_thread_flag flag)
1968{
1969 struct btrace_thread_info *btinfo;
1970
43792cf0 1971 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
e53c95d4 1972 tp->ptid.to_string ().c_str (), flag,
a068643d 1973 btrace_thread_flag_to_str (flag));
52834460
MM
1974
1975 btinfo = &tp->btrace;
1976
52834460 1977 /* Fetch the latest branch trace. */
4a4495d6 1978 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1979
0ca912df
MM
1980 /* A resume request overwrites a preceding resume or stop request. */
1981 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1982 btinfo->flags |= flag;
1983}
1984
ec71cc2f
MM
1985/* Get the current frame for TP. */
1986
79b8d3b0
TT
1987static struct frame_id
1988get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1989{
00431a78
PA
1990 /* Set current thread, which is implicitly used by
1991 get_current_frame. */
1992 scoped_restore_current_thread restore_thread;
1993
1994 switch_to_thread (tp);
ec71cc2f 1995
5b6d1e4f
PA
1996 process_stratum_target *proc_target = tp->inf->process_target ();
1997
ec71cc2f
MM
1998 /* Clear the executing flag to allow changes to the current frame.
1999 We are not actually running, yet. We just started a reverse execution
2000 command or a record goto command.
2001 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 2002 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f 2003 move the thread. Since we need to recompute the stack, we temporarily
85102364 2004 set EXECUTING to false. */
391c90ee 2005 bool executing = tp->executing ();
5b6d1e4f 2006 set_executing (proc_target, inferior_ptid, false);
391c90ee 2007 SCOPE_EXIT
ec71cc2f 2008 {
5b6d1e4f 2009 set_executing (proc_target, inferior_ptid, executing);
391c90ee
AB
2010 };
2011 return get_frame_id (get_current_frame ());
ec71cc2f
MM
2012}
2013
52834460
MM
2014/* Start replaying a thread. */
2015
2016static struct btrace_insn_iterator *
2017record_btrace_start_replaying (struct thread_info *tp)
2018{
52834460
MM
2019 struct btrace_insn_iterator *replay;
2020 struct btrace_thread_info *btinfo;
52834460
MM
2021
2022 btinfo = &tp->btrace;
2023 replay = NULL;
2024
2025 /* We can't start replaying without trace. */
b54b03bd 2026 if (btinfo->functions.empty ())
49a73ab9 2027 error (_("No trace."));
52834460 2028
52834460
MM
2029 /* GDB stores the current frame_id when stepping in order to detects steps
2030 into subroutines.
2031 Since frames are computed differently when we're replaying, we need to
2032 recompute those stored frames and fix them up so we can still detect
2033 subroutines after we started replaying. */
a70b8144 2034 try
52834460 2035 {
52834460
MM
2036 struct frame_id frame_id;
2037 int upd_step_frame_id, upd_step_stack_frame_id;
2038
2039 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2040 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2041
2042 /* Check if we need to update any stepping-related frame id's. */
a0cbd650
TT
2043 upd_step_frame_id = (frame_id == tp->control.step_frame_id);
2044 upd_step_stack_frame_id = (frame_id == tp->control.step_stack_frame_id);
52834460
MM
2045
2046 /* We start replaying at the end of the branch trace. This corresponds
2047 to the current instruction. */
8d749320 2048 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2049 btrace_insn_end (replay, btinfo);
2050
31fd9caa
MM
2051 /* Skip gaps at the end of the trace. */
2052 while (btrace_insn_get (replay) == NULL)
2053 {
2054 unsigned int steps;
2055
2056 steps = btrace_insn_prev (replay, 1);
2057 if (steps == 0)
2058 error (_("No trace."));
2059 }
2060
52834460
MM
2061 /* We're not replaying, yet. */
2062 gdb_assert (btinfo->replay == NULL);
2063 btinfo->replay = replay;
2064
2065 /* Make sure we're not using any stale registers. */
00431a78 2066 registers_changed_thread (tp);
52834460
MM
2067
2068 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2069 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2070
2071 /* Replace stepping related frames where necessary. */
2072 if (upd_step_frame_id)
2073 tp->control.step_frame_id = frame_id;
2074 if (upd_step_stack_frame_id)
2075 tp->control.step_stack_frame_id = frame_id;
2076 }
230d2906 2077 catch (const gdb_exception &except)
52834460
MM
2078 {
2079 xfree (btinfo->replay);
2080 btinfo->replay = NULL;
2081
00431a78 2082 registers_changed_thread (tp);
52834460 2083
eedc3f4f 2084 throw;
52834460
MM
2085 }
2086
2087 return replay;
2088}
2089
2090/* Stop replaying a thread. */
2091
2092static void
2093record_btrace_stop_replaying (struct thread_info *tp)
2094{
2095 struct btrace_thread_info *btinfo;
2096
2097 btinfo = &tp->btrace;
2098
2099 xfree (btinfo->replay);
2100 btinfo->replay = NULL;
2101
2102 /* Make sure we're not leaving any stale registers. */
00431a78 2103 registers_changed_thread (tp);
52834460
MM
2104}
2105
e3cfc1c7
MM
2106/* Stop replaying TP if it is at the end of its execution history. */
2107
2108static void
2109record_btrace_stop_replaying_at_end (struct thread_info *tp)
2110{
2111 struct btrace_insn_iterator *replay, end;
2112 struct btrace_thread_info *btinfo;
2113
2114 btinfo = &tp->btrace;
2115 replay = btinfo->replay;
2116
2117 if (replay == NULL)
2118 return;
2119
2120 btrace_insn_end (&end, btinfo);
2121
2122 if (btrace_insn_cmp (replay, &end) == 0)
2123 record_btrace_stop_replaying (tp);
2124}
2125
f6ac5f3d 2126/* The resume method of target record-btrace. */
b2f4cfde 2127
f6ac5f3d
PA
2128void
2129record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2130{
d2939ba2 2131 enum btrace_thread_flag flag, cflag;
52834460 2132
e53c95d4 2133 DEBUG ("resume %s: %s%s", ptid.to_string ().c_str (),
f6ac5f3d 2134 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2135 step ? "step" : "cont");
52834460 2136
0ca912df
MM
2137 /* Store the execution direction of the last resume.
2138
f6ac5f3d 2139 If there is more than one resume call, we have to rely on infrun
0ca912df 2140 to not change the execution direction in-between. */
f6ac5f3d 2141 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2142
0ca912df 2143 /* As long as we're not replaying, just forward the request.
52834460 2144
0ca912df
MM
2145 For non-stop targets this means that no thread is replaying. In order to
2146 make progress, we may need to explicitly move replaying threads to the end
2147 of their execution history. */
f6ac5f3d
PA
2148 if ((::execution_direction != EXEC_REVERSE)
2149 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2150 {
b6a8c27b 2151 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2152 return;
b2f4cfde
MM
2153 }
2154
52834460 2155 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2156 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2157 {
2158 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2159 cflag = BTHR_RCONT;
2160 }
52834460 2161 else
d2939ba2
MM
2162 {
2163 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2164 cflag = BTHR_CONT;
2165 }
52834460 2166
52834460 2167 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2168 record_btrace_wait below.
2169
2170 For all-stop targets, we only step INFERIOR_PTID and continue others. */
5b6d1e4f
PA
2171
2172 process_stratum_target *proc_target = current_inferior ()->process_target ();
2173
d2939ba2
MM
2174 if (!target_is_non_stop_p ())
2175 {
26a57c92 2176 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2177
5b6d1e4f 2178 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2179 {
2180 if (tp->ptid.matches (inferior_ptid))
2181 record_btrace_resume_thread (tp, flag);
2182 else
2183 record_btrace_resume_thread (tp, cflag);
2184 }
d2939ba2
MM
2185 }
2186 else
2187 {
5b6d1e4f 2188 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 2189 record_btrace_resume_thread (tp, flag);
d2939ba2 2190 }
70ad5bff
MM
2191
2192 /* Async support. */
2193 if (target_can_async_p ())
2194 {
4a570176 2195 target_async (true);
70ad5bff
MM
2196 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2197 }
52834460
MM
2198}
2199
987e68b1
MM
2200/* Cancel resuming TP. */
2201
2202static void
2203record_btrace_cancel_resume (struct thread_info *tp)
2204{
04902b09 2205 btrace_thread_flags flags;
987e68b1
MM
2206
2207 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2208 if (flags == 0)
2209 return;
2210
43792cf0
PA
2211 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2212 print_thread_id (tp),
e53c95d4 2213 tp->ptid.to_string ().c_str (), flags.raw (),
987e68b1
MM
2214 btrace_thread_flag_to_str (flags));
2215
2216 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2217 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2218}
2219
2220/* Return a target_waitstatus indicating that we ran out of history. */
2221
2222static struct target_waitstatus
2223btrace_step_no_history (void)
2224{
2225 struct target_waitstatus status;
2226
183be222 2227 status.set_no_history ();
52834460
MM
2228
2229 return status;
2230}
2231
2232/* Return a target_waitstatus indicating that a step finished. */
2233
2234static struct target_waitstatus
2235btrace_step_stopped (void)
2236{
2237 struct target_waitstatus status;
2238
183be222 2239 status.set_stopped (GDB_SIGNAL_TRAP);
52834460
MM
2240
2241 return status;
2242}
2243
6e4879f0
MM
2244/* Return a target_waitstatus indicating that a thread was stopped as
2245 requested. */
2246
2247static struct target_waitstatus
2248btrace_step_stopped_on_request (void)
2249{
2250 struct target_waitstatus status;
2251
183be222 2252 status.set_stopped (GDB_SIGNAL_0);
6e4879f0
MM
2253
2254 return status;
2255}
2256
d825d248
MM
2257/* Return a target_waitstatus indicating a spurious stop. */
2258
2259static struct target_waitstatus
2260btrace_step_spurious (void)
2261{
2262 struct target_waitstatus status;
2263
183be222 2264 status.set_spurious ();
d825d248
MM
2265
2266 return status;
2267}
2268
e3cfc1c7
MM
2269/* Return a target_waitstatus indicating that the thread was not resumed. */
2270
2271static struct target_waitstatus
2272btrace_step_no_resumed (void)
2273{
2274 struct target_waitstatus status;
2275
183be222 2276 status.set_no_resumed ();
e3cfc1c7
MM
2277
2278 return status;
2279}
2280
2281/* Return a target_waitstatus indicating that we should wait again. */
2282
2283static struct target_waitstatus
2284btrace_step_again (void)
2285{
2286 struct target_waitstatus status;
2287
183be222 2288 status.set_ignore ();
e3cfc1c7
MM
2289
2290 return status;
2291}
2292
52834460
MM
2293/* Clear the record histories. */
2294
2295static void
2296record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2297{
2298 xfree (btinfo->insn_history);
2299 xfree (btinfo->call_history);
2300
2301 btinfo->insn_history = NULL;
2302 btinfo->call_history = NULL;
2303}
2304
3c615f99
MM
2305/* Check whether TP's current replay position is at a breakpoint. */
2306
2307static int
2308record_btrace_replay_at_breakpoint (struct thread_info *tp)
2309{
2310 struct btrace_insn_iterator *replay;
2311 struct btrace_thread_info *btinfo;
2312 const struct btrace_insn *insn;
3c615f99
MM
2313
2314 btinfo = &tp->btrace;
2315 replay = btinfo->replay;
2316
2317 if (replay == NULL)
2318 return 0;
2319
2320 insn = btrace_insn_get (replay);
2321 if (insn == NULL)
2322 return 0;
2323
00431a78 2324 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2325 &btinfo->stop_reason);
2326}
2327
d825d248 2328/* Step one instruction in forward direction. */
52834460
MM
2329
2330static struct target_waitstatus
d825d248 2331record_btrace_single_step_forward (struct thread_info *tp)
52834460 2332{
b61ce85c 2333 struct btrace_insn_iterator *replay, end, start;
52834460 2334 struct btrace_thread_info *btinfo;
52834460 2335
d825d248
MM
2336 btinfo = &tp->btrace;
2337 replay = btinfo->replay;
2338
2339 /* We're done if we're not replaying. */
2340 if (replay == NULL)
2341 return btrace_step_no_history ();
2342
011c71b6
MM
2343 /* Check if we're stepping a breakpoint. */
2344 if (record_btrace_replay_at_breakpoint (tp))
2345 return btrace_step_stopped ();
2346
b61ce85c
MM
2347 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2348 jump back to the instruction at which we started. */
2349 start = *replay;
d825d248
MM
2350 do
2351 {
2352 unsigned int steps;
2353
e3cfc1c7
MM
2354 /* We will bail out here if we continue stepping after reaching the end
2355 of the execution history. */
d825d248
MM
2356 steps = btrace_insn_next (replay, 1);
2357 if (steps == 0)
b61ce85c
MM
2358 {
2359 *replay = start;
2360 return btrace_step_no_history ();
2361 }
d825d248
MM
2362 }
2363 while (btrace_insn_get (replay) == NULL);
2364
2365 /* Determine the end of the instruction trace. */
2366 btrace_insn_end (&end, btinfo);
2367
e3cfc1c7
MM
2368 /* The execution trace contains (and ends with) the current instruction.
2369 This instruction has not been executed, yet, so the trace really ends
2370 one instruction earlier. */
d825d248 2371 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2372 return btrace_step_no_history ();
d825d248
MM
2373
2374 return btrace_step_spurious ();
2375}
2376
2377/* Step one instruction in backward direction. */
2378
2379static struct target_waitstatus
2380record_btrace_single_step_backward (struct thread_info *tp)
2381{
b61ce85c 2382 struct btrace_insn_iterator *replay, start;
d825d248 2383 struct btrace_thread_info *btinfo;
e59fa00f 2384
52834460
MM
2385 btinfo = &tp->btrace;
2386 replay = btinfo->replay;
2387
d825d248
MM
2388 /* Start replaying if we're not already doing so. */
2389 if (replay == NULL)
2390 replay = record_btrace_start_replaying (tp);
2391
2392 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2393 Skip gaps during replay. If we end up at a gap (at the beginning of
2394 the trace), jump back to the instruction at which we started. */
2395 start = *replay;
d825d248
MM
2396 do
2397 {
2398 unsigned int steps;
2399
2400 steps = btrace_insn_prev (replay, 1);
2401 if (steps == 0)
b61ce85c
MM
2402 {
2403 *replay = start;
2404 return btrace_step_no_history ();
2405 }
d825d248
MM
2406 }
2407 while (btrace_insn_get (replay) == NULL);
2408
011c71b6
MM
2409 /* Check if we're stepping a breakpoint.
2410
2411 For reverse-stepping, this check is after the step. There is logic in
2412 infrun.c that handles reverse-stepping separately. See, for example,
2413 proceed and adjust_pc_after_break.
2414
2415 This code assumes that for reverse-stepping, PC points to the last
2416 de-executed instruction, whereas for forward-stepping PC points to the
2417 next to-be-executed instruction. */
2418 if (record_btrace_replay_at_breakpoint (tp))
2419 return btrace_step_stopped ();
2420
d825d248
MM
2421 return btrace_step_spurious ();
2422}
2423
2424/* Step a single thread. */
2425
2426static struct target_waitstatus
2427record_btrace_step_thread (struct thread_info *tp)
2428{
2429 struct btrace_thread_info *btinfo;
2430 struct target_waitstatus status;
04902b09 2431 btrace_thread_flags flags;
d825d248
MM
2432
2433 btinfo = &tp->btrace;
2434
6e4879f0
MM
2435 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2436 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2437
43792cf0 2438 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
e53c95d4 2439 tp->ptid.to_string ().c_str (), flags.raw (),
987e68b1 2440 btrace_thread_flag_to_str (flags));
52834460 2441
6e4879f0
MM
2442 /* We can't step without an execution history. */
2443 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2444 return btrace_step_no_history ();
2445
52834460
MM
2446 switch (flags)
2447 {
2448 default:
f34652de 2449 internal_error (_("invalid stepping type."));
52834460 2450
6e4879f0
MM
2451 case BTHR_STOP:
2452 return btrace_step_stopped_on_request ();
2453
52834460 2454 case BTHR_STEP:
d825d248 2455 status = record_btrace_single_step_forward (tp);
183be222 2456 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2457 break;
52834460
MM
2458
2459 return btrace_step_stopped ();
2460
2461 case BTHR_RSTEP:
d825d248 2462 status = record_btrace_single_step_backward (tp);
183be222 2463 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2464 break;
52834460
MM
2465
2466 return btrace_step_stopped ();
2467
2468 case BTHR_CONT:
e3cfc1c7 2469 status = record_btrace_single_step_forward (tp);
183be222 2470 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2471 break;
52834460 2472
e3cfc1c7
MM
2473 btinfo->flags |= flags;
2474 return btrace_step_again ();
52834460
MM
2475
2476 case BTHR_RCONT:
e3cfc1c7 2477 status = record_btrace_single_step_backward (tp);
183be222 2478 if (status.kind () != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2479 break;
52834460 2480
e3cfc1c7
MM
2481 btinfo->flags |= flags;
2482 return btrace_step_again ();
2483 }
d825d248 2484
f6ac5f3d 2485 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7 2486 method will stop the thread for whom the event is reported. */
183be222 2487 if (status.kind () == TARGET_WAITKIND_NO_HISTORY)
e3cfc1c7 2488 btinfo->flags |= flags;
52834460 2489
e3cfc1c7 2490 return status;
b2f4cfde
MM
2491}
2492
a6b5be76
MM
2493/* Announce further events if necessary. */
2494
2495static void
53127008
SM
2496record_btrace_maybe_mark_async_event
2497 (const std::vector<thread_info *> &moving,
2498 const std::vector<thread_info *> &no_history)
a6b5be76 2499{
53127008
SM
2500 bool more_moving = !moving.empty ();
2501 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2502
2503 if (!more_moving && !more_no_history)
2504 return;
2505
2506 if (more_moving)
2507 DEBUG ("movers pending");
2508
2509 if (more_no_history)
2510 DEBUG ("no-history pending");
2511
2512 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2513}
2514
f6ac5f3d 2515/* The wait method of target record-btrace. */
b2f4cfde 2516
f6ac5f3d
PA
2517ptid_t
2518record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
b60cea74 2519 target_wait_flags options)
b2f4cfde 2520{
53127008
SM
2521 std::vector<thread_info *> moving;
2522 std::vector<thread_info *> no_history;
52834460 2523
85d3ad8e
SM
2524 /* Clear this, if needed we'll re-mark it below. */
2525 clear_async_event_handler (record_btrace_async_inferior_event_handler);
2526
e53c95d4 2527 DEBUG ("wait %s (0x%x)", ptid.to_string ().c_str (),
b60cea74 2528 (unsigned) options);
52834460 2529
b2f4cfde 2530 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2531 if ((::execution_direction != EXEC_REVERSE)
2532 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2533 {
b6a8c27b 2534 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2535 }
2536
e3cfc1c7 2537 /* Keep a work list of moving threads. */
5b6d1e4f
PA
2538 process_stratum_target *proc_target = current_inferior ()->process_target ();
2539 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2540 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2541 moving.push_back (tp);
e3cfc1c7 2542
53127008 2543 if (moving.empty ())
52834460 2544 {
e3cfc1c7 2545 *status = btrace_step_no_resumed ();
52834460 2546
e53c95d4 2547 DEBUG ("wait ended by %s: %s", null_ptid.to_string ().c_str (),
7dca2ea7 2548 status->to_string ().c_str ());
e3cfc1c7 2549
e3cfc1c7 2550 return null_ptid;
52834460
MM
2551 }
2552
e3cfc1c7
MM
2553 /* Step moving threads one by one, one step each, until either one thread
2554 reports an event or we run out of threads to step.
2555
2556 When stepping more than one thread, chances are that some threads reach
2557 the end of their execution history earlier than others. If we reported
2558 this immediately, all-stop on top of non-stop would stop all threads and
2559 resume the same threads next time. And we would report the same thread
2560 having reached the end of its execution history again.
2561
2562 In the worst case, this would starve the other threads. But even if other
2563 threads would be allowed to make progress, this would result in far too
2564 many intermediate stops.
2565
2566 We therefore delay the reporting of "no execution history" until we have
2567 nothing else to report. By this time, all threads should have moved to
2568 either the beginning or the end of their execution history. There will
2569 be a single user-visible stop. */
53127008
SM
2570 struct thread_info *eventing = NULL;
2571 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2572 {
53127008 2573 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2574 {
53127008
SM
2575 thread_info *tp = moving[ix];
2576
e3cfc1c7
MM
2577 *status = record_btrace_step_thread (tp);
2578
183be222 2579 switch (status->kind ())
e3cfc1c7
MM
2580 {
2581 case TARGET_WAITKIND_IGNORE:
2582 ix++;
2583 break;
2584
2585 case TARGET_WAITKIND_NO_HISTORY:
53127008 2586 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2587 break;
2588
2589 default:
53127008 2590 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2591 break;
2592 }
2593 }
2594 }
2595
2596 if (eventing == NULL)
2597 {
2598 /* We started with at least one moving thread. This thread must have
2599 either stopped or reached the end of its execution history.
2600
2601 In the former case, EVENTING must not be NULL.
2602 In the latter case, NO_HISTORY must not be empty. */
53127008 2603 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2604
2605 /* We kept threads moving at the end of their execution history. Stop
2606 EVENTING now that we are going to report its stop. */
53127008 2607 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2608 eventing->btrace.flags &= ~BTHR_MOVE;
2609
2610 *status = btrace_step_no_history ();
2611 }
2612
2613 gdb_assert (eventing != NULL);
2614
2615 /* We kept threads replaying at the end of their execution history. Stop
2616 replaying EVENTING now that we are going to report its stop. */
2617 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2618
2619 /* Stop all other threads. */
5953356c 2620 if (!target_is_non_stop_p ())
53127008 2621 {
d89edf9b 2622 for (thread_info *tp : current_inferior ()->non_exited_threads ())
53127008
SM
2623 record_btrace_cancel_resume (tp);
2624 }
52834460 2625
a6b5be76
MM
2626 /* In async mode, we need to announce further events. */
2627 if (target_is_async_p ())
2628 record_btrace_maybe_mark_async_event (moving, no_history);
2629
52834460 2630 /* Start record histories anew from the current position. */
e3cfc1c7 2631 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2632
2633 /* We moved the replay position but did not update registers. */
00431a78 2634 registers_changed_thread (eventing);
e3cfc1c7 2635
43792cf0
PA
2636 DEBUG ("wait ended by thread %s (%s): %s",
2637 print_thread_id (eventing),
e53c95d4 2638 eventing->ptid.to_string ().c_str (),
7dca2ea7 2639 status->to_string ().c_str ());
52834460 2640
e3cfc1c7 2641 return eventing->ptid;
52834460
MM
2642}
2643
f6ac5f3d 2644/* The stop method of target record-btrace. */
6e4879f0 2645
f6ac5f3d
PA
2646void
2647record_btrace_target::stop (ptid_t ptid)
6e4879f0 2648{
e53c95d4 2649 DEBUG ("stop %s", ptid.to_string ().c_str ());
6e4879f0
MM
2650
2651 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2652 if ((::execution_direction != EXEC_REVERSE)
2653 && !record_is_replaying (minus_one_ptid))
6e4879f0 2654 {
b6a8c27b 2655 this->beneath ()->stop (ptid);
6e4879f0
MM
2656 }
2657 else
2658 {
5b6d1e4f
PA
2659 process_stratum_target *proc_target
2660 = current_inferior ()->process_target ();
2661
2662 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2663 {
2664 tp->btrace.flags &= ~BTHR_MOVE;
2665 tp->btrace.flags |= BTHR_STOP;
2666 }
6e4879f0
MM
2667 }
2668 }
2669
f6ac5f3d 2670/* The can_execute_reverse method of target record-btrace. */
52834460 2671
57810aa7 2672bool
f6ac5f3d 2673record_btrace_target::can_execute_reverse ()
52834460 2674{
57810aa7 2675 return true;
52834460
MM
2676}
2677
f6ac5f3d 2678/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2679
57810aa7 2680bool
f6ac5f3d 2681record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2682{
f6ac5f3d 2683 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2684 {
2685 struct thread_info *tp = inferior_thread ();
2686
2687 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2688 }
2689
b6a8c27b 2690 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2691}
2692
f6ac5f3d 2693/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2694 record-btrace. */
2695
57810aa7 2696bool
f6ac5f3d 2697record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2698{
f6ac5f3d 2699 if (record_is_replaying (minus_one_ptid))
57810aa7 2700 return true;
9e8915c6 2701
b6a8c27b 2702 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2703}
2704
f6ac5f3d 2705/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2706
57810aa7 2707bool
f6ac5f3d 2708record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2709{
f6ac5f3d 2710 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2711 {
2712 struct thread_info *tp = inferior_thread ();
2713
2714 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2715 }
2716
b6a8c27b 2717 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2718}
2719
f6ac5f3d 2720/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2721 record-btrace. */
2722
57810aa7 2723bool
f6ac5f3d 2724record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2725{
f6ac5f3d 2726 if (record_is_replaying (minus_one_ptid))
57810aa7 2727 return true;
52834460 2728
b6a8c27b 2729 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2730}
2731
f6ac5f3d 2732/* The update_thread_list method of target record-btrace. */
e2887aa3 2733
f6ac5f3d
PA
2734void
2735record_btrace_target::update_thread_list ()
e2887aa3 2736{
e8032dde 2737 /* We don't add or remove threads during replay. */
f6ac5f3d 2738 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2739 return;
2740
2741 /* Forward the request. */
b6a8c27b 2742 this->beneath ()->update_thread_list ();
e2887aa3
MM
2743}
2744
f6ac5f3d 2745/* The thread_alive method of target record-btrace. */
e2887aa3 2746
57810aa7 2747bool
f6ac5f3d 2748record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2749{
2750 /* We don't add or remove threads during replay. */
f6ac5f3d 2751 if (record_is_replaying (minus_one_ptid))
00431a78 2752 return true;
e2887aa3
MM
2753
2754 /* Forward the request. */
b6a8c27b 2755 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2756}
2757
066ce621
MM
2758/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2759 is stopped. */
2760
2761static void
2762record_btrace_set_replay (struct thread_info *tp,
2763 const struct btrace_insn_iterator *it)
2764{
2765 struct btrace_thread_info *btinfo;
2766
2767 btinfo = &tp->btrace;
2768
a0f1b963 2769 if (it == NULL)
52834460 2770 record_btrace_stop_replaying (tp);
066ce621
MM
2771 else
2772 {
2773 if (btinfo->replay == NULL)
52834460 2774 record_btrace_start_replaying (tp);
066ce621
MM
2775 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2776 return;
2777
2778 *btinfo->replay = *it;
00431a78 2779 registers_changed_thread (tp);
066ce621
MM
2780 }
2781
52834460
MM
2782 /* Start anew from the new replay position. */
2783 record_btrace_clear_histories (btinfo);
485668e5 2784
1edb66d8 2785 inferior_thread ()->set_stop_pc (regcache_read_pc (get_current_regcache ()));
485668e5 2786 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2787}
2788
f6ac5f3d 2789/* The goto_record_begin method of target record-btrace. */
066ce621 2790
f6ac5f3d
PA
2791void
2792record_btrace_target::goto_record_begin ()
066ce621
MM
2793{
2794 struct thread_info *tp;
2795 struct btrace_insn_iterator begin;
2796
2797 tp = require_btrace_thread ();
2798
2799 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2800
2801 /* Skip gaps at the beginning of the trace. */
2802 while (btrace_insn_get (&begin) == NULL)
2803 {
2804 unsigned int steps;
2805
2806 steps = btrace_insn_next (&begin, 1);
2807 if (steps == 0)
2808 error (_("No trace."));
2809 }
2810
066ce621 2811 record_btrace_set_replay (tp, &begin);
066ce621
MM
2812}
2813
f6ac5f3d 2814/* The goto_record_end method of target record-btrace. */
066ce621 2815
f6ac5f3d
PA
2816void
2817record_btrace_target::goto_record_end ()
066ce621
MM
2818{
2819 struct thread_info *tp;
2820
2821 tp = require_btrace_thread ();
2822
2823 record_btrace_set_replay (tp, NULL);
066ce621
MM
2824}
2825
f6ac5f3d 2826/* The goto_record method of target record-btrace. */
066ce621 2827
f6ac5f3d
PA
2828void
2829record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2830{
2831 struct thread_info *tp;
2832 struct btrace_insn_iterator it;
2833 unsigned int number;
2834 int found;
2835
2836 number = insn;
2837
2838 /* Check for wrap-arounds. */
2839 if (number != insn)
2840 error (_("Instruction number out of range."));
2841
2842 tp = require_btrace_thread ();
2843
2844 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2845
2846 /* Check if the instruction could not be found or is a gap. */
2847 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2848 error (_("No such instruction."));
2849
2850 record_btrace_set_replay (tp, &it);
066ce621
MM
2851}
2852
f6ac5f3d 2853/* The record_stop_replaying method of target record-btrace. */
797094dd 2854
f6ac5f3d
PA
2855void
2856record_btrace_target::record_stop_replaying ()
797094dd 2857{
d89edf9b 2858 for (thread_info *tp : current_inferior ()->non_exited_threads ())
797094dd
MM
2859 record_btrace_stop_replaying (tp);
2860}
2861
f6ac5f3d 2862/* The execution_direction target method. */
70ad5bff 2863
f6ac5f3d
PA
2864enum exec_direction_kind
2865record_btrace_target::execution_direction ()
70ad5bff
MM
2866{
2867 return record_btrace_resume_exec_dir;
2868}
2869
f6ac5f3d 2870/* The prepare_to_generate_core target method. */
aef92902 2871
f6ac5f3d
PA
2872void
2873record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2874{
2875 record_btrace_generating_corefile = 1;
2876}
2877
f6ac5f3d 2878/* The done_generating_core target method. */
aef92902 2879
f6ac5f3d
PA
2880void
2881record_btrace_target::done_generating_core ()
aef92902
MM
2882{
2883 record_btrace_generating_corefile = 0;
2884}
2885
f4abbc16
MM
2886/* Start recording in BTS format. */
2887
2888static void
cdb34d4a 2889cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2890{
f4abbc16
MM
2891 if (args != NULL && *args != 0)
2892 error (_("Invalid argument."));
2893
2894 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2895
a70b8144 2896 try
492d29ea 2897 {
95a6b0a1 2898 execute_command ("target record-btrace", from_tty);
492d29ea 2899 }
230d2906 2900 catch (const gdb_exception &exception)
f4abbc16
MM
2901 {
2902 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2903 throw;
f4abbc16
MM
2904 }
2905}
2906
bc504a31 2907/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2908
2909static void
cdb34d4a 2910cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2911{
2912 if (args != NULL && *args != 0)
2913 error (_("Invalid argument."));
2914
b20a6524 2915 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2916
a70b8144 2917 try
492d29ea 2918 {
95a6b0a1 2919 execute_command ("target record-btrace", from_tty);
492d29ea 2920 }
230d2906 2921 catch (const gdb_exception &exception)
492d29ea
PA
2922 {
2923 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2924 throw;
492d29ea 2925 }
afedecd3
MM
2926}
2927
b20a6524
MM
2928/* Alias for "target record". */
2929
2930static void
981a3fb3 2931cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2932{
2933 if (args != NULL && *args != 0)
2934 error (_("Invalid argument."));
2935
2936 record_btrace_conf.format = BTRACE_FORMAT_PT;
2937
a70b8144 2938 try
b20a6524 2939 {
95a6b0a1 2940 execute_command ("target record-btrace", from_tty);
b20a6524 2941 }
b1ffd112 2942 catch (const gdb_exception_error &exception)
b20a6524
MM
2943 {
2944 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2945
a70b8144 2946 try
b20a6524 2947 {
95a6b0a1 2948 execute_command ("target record-btrace", from_tty);
b20a6524 2949 }
230d2906 2950 catch (const gdb_exception &ex)
b20a6524
MM
2951 {
2952 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2953 throw;
b20a6524 2954 }
b20a6524 2955 }
b20a6524
MM
2956}
2957
67b5c0c1
MM
2958/* The "show record btrace replay-memory-access" command. */
2959
2960static void
2961cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2962 struct cmd_list_element *c, const char *value)
2963{
6cb06a8c
TT
2964 gdb_printf (file, _("Replay memory access is %s.\n"),
2965 replay_memory_access);
67b5c0c1
MM
2966}
2967
4a4495d6
MM
2968/* The "set record btrace cpu none" command. */
2969
2970static void
2971cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2972{
2973 if (args != nullptr && *args != 0)
2974 error (_("Trailing junk: '%s'."), args);
2975
2976 record_btrace_cpu_state = CS_NONE;
2977}
2978
2979/* The "set record btrace cpu auto" command. */
2980
2981static void
2982cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2983{
2984 if (args != nullptr && *args != 0)
2985 error (_("Trailing junk: '%s'."), args);
2986
2987 record_btrace_cpu_state = CS_AUTO;
2988}
2989
2990/* The "set record btrace cpu" command. */
2991
2992static void
2993cmd_set_record_btrace_cpu (const char *args, int from_tty)
2994{
2995 if (args == nullptr)
2996 args = "";
2997
2998 /* We use a hard-coded vendor string for now. */
2999 unsigned int family, model, stepping;
3000 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3001 &model, &l1, &stepping, &l2);
3002 if (matches == 3)
3003 {
3004 if (strlen (args) != l2)
3005 error (_("Trailing junk: '%s'."), args + l2);
3006 }
3007 else if (matches == 2)
3008 {
3009 if (strlen (args) != l1)
3010 error (_("Trailing junk: '%s'."), args + l1);
3011
3012 stepping = 0;
3013 }
3014 else
3015 error (_("Bad format. See \"help set record btrace cpu\"."));
3016
3017 if (USHRT_MAX < family)
3018 error (_("Cpu family too big."));
3019
3020 if (UCHAR_MAX < model)
3021 error (_("Cpu model too big."));
3022
3023 if (UCHAR_MAX < stepping)
3024 error (_("Cpu stepping too big."));
3025
3026 record_btrace_cpu.vendor = CV_INTEL;
3027 record_btrace_cpu.family = family;
3028 record_btrace_cpu.model = model;
3029 record_btrace_cpu.stepping = stepping;
3030
3031 record_btrace_cpu_state = CS_CPU;
3032}
3033
3034/* The "show record btrace cpu" command. */
3035
3036static void
3037cmd_show_record_btrace_cpu (const char *args, int from_tty)
3038{
4a4495d6
MM
3039 if (args != nullptr && *args != 0)
3040 error (_("Trailing junk: '%s'."), args);
3041
3042 switch (record_btrace_cpu_state)
3043 {
3044 case CS_AUTO:
6cb06a8c 3045 gdb_printf (_("btrace cpu is 'auto'.\n"));
4a4495d6
MM
3046 return;
3047
3048 case CS_NONE:
6cb06a8c 3049 gdb_printf (_("btrace cpu is 'none'.\n"));
4a4495d6
MM
3050 return;
3051
3052 case CS_CPU:
3053 switch (record_btrace_cpu.vendor)
3054 {
3055 case CV_INTEL:
3056 if (record_btrace_cpu.stepping == 0)
6cb06a8c
TT
3057 gdb_printf (_("btrace cpu is 'intel: %u/%u'.\n"),
3058 record_btrace_cpu.family,
3059 record_btrace_cpu.model);
4a4495d6 3060 else
6cb06a8c
TT
3061 gdb_printf (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3062 record_btrace_cpu.family,
3063 record_btrace_cpu.model,
3064 record_btrace_cpu.stepping);
4a4495d6
MM
3065 return;
3066 }
3067 }
3068
3069 error (_("Internal error: bad cpu state."));
3070}
3071
b20a6524
MM
3072/* The "record bts buffer-size" show value function. */
3073
3074static void
3075show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3076 struct cmd_list_element *c,
3077 const char *value)
3078{
6cb06a8c
TT
3079 gdb_printf (file, _("The record/replay bts buffer size is %s.\n"),
3080 value);
b20a6524
MM
3081}
3082
3083/* The "record pt buffer-size" show value function. */
3084
3085static void
3086show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3087 struct cmd_list_element *c,
3088 const char *value)
3089{
6cb06a8c
TT
3090 gdb_printf (file, _("The record/replay pt buffer size is %s.\n"),
3091 value);
b20a6524
MM
3092}
3093
afedecd3
MM
3094/* Initialize btrace commands. */
3095
6c265988 3096void _initialize_record_btrace ();
afedecd3 3097void
6c265988 3098_initialize_record_btrace ()
afedecd3 3099{
5e84b7ee
SM
3100 cmd_list_element *record_btrace_cmd
3101 = add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3102 _("Start branch trace recording."),
3103 &record_btrace_cmdlist, 0, &record_cmdlist);
3104 add_alias_cmd ("b", record_btrace_cmd, class_obscure, 1, &record_cmdlist);
3105
3106 cmd_list_element *record_btrace_bts_cmd
3107 = add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3108 _("\
f4abbc16
MM
3109Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3110The processor stores a from/to record for each branch into a cyclic buffer.\n\
3111This format may not be available on all processors."),
5e84b7ee
SM
3112 &record_btrace_cmdlist);
3113 add_alias_cmd ("bts", record_btrace_bts_cmd, class_obscure, 1,
3114 &record_cmdlist);
f4abbc16 3115
5e84b7ee
SM
3116 cmd_list_element *record_btrace_pt_cmd
3117 = add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3118 _("\
bc504a31 3119Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524 3120This format may not be available on all processors."),
5e84b7ee
SM
3121 &record_btrace_cmdlist);
3122 add_alias_cmd ("pt", record_btrace_pt_cmd, class_obscure, 1, &record_cmdlist);
b20a6524 3123
f54bdb6d
SM
3124 add_setshow_prefix_cmd ("btrace", class_support,
3125 _("Set record options."),
3126 _("Show record options."),
3127 &set_record_btrace_cmdlist,
3128 &show_record_btrace_cmdlist,
3129 &set_record_cmdlist, &show_record_cmdlist);
67b5c0c1
MM
3130
3131 add_setshow_enum_cmd ("replay-memory-access", no_class,
3132 replay_memory_access_types, &replay_memory_access, _("\
3133Set what memory accesses are allowed during replay."), _("\
3134Show what memory accesses are allowed during replay."),
3135 _("Default is READ-ONLY.\n\n\
3136The btrace record target does not trace data.\n\
3137The memory therefore corresponds to the live target and not \
3138to the current replay position.\n\n\
3139When READ-ONLY, allow accesses to read-only memory during replay.\n\
3140When READ-WRITE, allow accesses to read-only and read-write memory during \
3141replay."),
3142 NULL, cmd_show_replay_memory_access,
3143 &set_record_btrace_cmdlist,
3144 &show_record_btrace_cmdlist);
3145
4a4495d6
MM
3146 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3147 _("\
3148Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3149The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3150For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3151When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3152The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3153When GDB does not support that cpu, this option can be used to enable\n\
3154workarounds for a similar cpu that GDB supports.\n\n\
3155When set to \"none\", errata workarounds are disabled."),
3156 &set_record_btrace_cpu_cmdlist,
2f822da5 3157 1,
4a4495d6
MM
3158 &set_record_btrace_cmdlist);
3159
3160 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3161Automatically determine the cpu to be used for trace decode."),
3162 &set_record_btrace_cpu_cmdlist);
3163
3164 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3165Do not enable errata workarounds for trace decode."),
3166 &set_record_btrace_cpu_cmdlist);
3167
3168 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3169Show the cpu to be used for trace decode."),
3170 &show_record_btrace_cmdlist);
3171
f54bdb6d
SM
3172 add_setshow_prefix_cmd ("bts", class_support,
3173 _("Set record btrace bts options."),
3174 _("Show record btrace bts options."),
3175 &set_record_btrace_bts_cmdlist,
3176 &show_record_btrace_bts_cmdlist,
3177 &set_record_btrace_cmdlist,
3178 &show_record_btrace_cmdlist);
d33501a5
MM
3179
3180 add_setshow_uinteger_cmd ("buffer-size", no_class,
3181 &record_btrace_conf.bts.size,
3182 _("Set the record/replay bts buffer size."),
3183 _("Show the record/replay bts buffer size."), _("\
3184When starting recording request a trace buffer of this size. \
3185The actual buffer size may differ from the requested size. \
3186Use \"info record\" to see the actual buffer size.\n\n\
3187Bigger buffers allow longer recording but also take more time to process \
3188the recorded execution trace.\n\n\
b20a6524
MM
3189The trace buffer size may not be changed while recording."), NULL,
3190 show_record_bts_buffer_size_value,
d33501a5
MM
3191 &set_record_btrace_bts_cmdlist,
3192 &show_record_btrace_bts_cmdlist);
3193
f54bdb6d
SM
3194 add_setshow_prefix_cmd ("pt", class_support,
3195 _("Set record btrace pt options."),
3196 _("Show record btrace pt options."),
3197 &set_record_btrace_pt_cmdlist,
3198 &show_record_btrace_pt_cmdlist,
3199 &set_record_btrace_cmdlist,
3200 &show_record_btrace_cmdlist);
b20a6524
MM
3201
3202 add_setshow_uinteger_cmd ("buffer-size", no_class,
3203 &record_btrace_conf.pt.size,
3204 _("Set the record/replay pt buffer size."),
3205 _("Show the record/replay pt buffer size."), _("\
3206Bigger buffers allow longer recording but also take more time to process \
3207the recorded execution.\n\
3208The actual buffer size may differ from the requested size. Use \"info record\" \
3209to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3210 &set_record_btrace_pt_cmdlist,
3211 &show_record_btrace_pt_cmdlist);
3212
d9f719f1 3213 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3214
3215 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3216 xcalloc, xfree);
d33501a5
MM
3217
3218 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3219 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3220}