]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
Replace most calls to help_list and cmd_show_list
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
b811d2c2 3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
400b5eca 39#include "gdbsupport/event-loop.h"
70ad5bff 40#include "inf-loop.h"
00431a78 41#include "inferior.h"
325fac50 42#include <algorithm>
0d12e84c 43#include "gdbarch.h"
e43b10e1 44#include "cli/cli-style.h"
93b54c8e 45#include "async-event.h"
afedecd3 46
d9f719f1
PA
47static const target_info record_btrace_target_info = {
48 "record-btrace",
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
51};
52
afedecd3 53/* The target_ops of record-btrace. */
f6ac5f3d
PA
54
55class record_btrace_target final : public target_ops
56{
57public:
d9f719f1
PA
58 const target_info &info () const override
59 { return record_btrace_target_info; }
f6ac5f3d 60
66b4deae
PA
61 strata stratum () const override { return record_stratum; }
62
f6ac5f3d
PA
63 void close () override;
64 void async (int) override;
65
66 void detach (inferior *inf, int from_tty) override
67 { record_detach (this, inf, from_tty); }
68
69 void disconnect (const char *, int) override;
70
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
73
74 void kill () override
75 { record_kill (this); }
76
77 enum record_method record_method (ptid_t ptid) override;
78
79 void stop_recording () override;
80 void info_record () override;
81
82 void insn_history (int size, gdb_disassembly_flags flags) override;
83 void insn_history_from (ULONGEST from, int size,
84 gdb_disassembly_flags flags) override;
85 void insn_history_range (ULONGEST begin, ULONGEST end,
86 gdb_disassembly_flags flags) override;
87 void call_history (int size, record_print_flags flags) override;
88 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 override;
90 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 override;
92
57810aa7
PA
93 bool record_is_replaying (ptid_t ptid) override;
94 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
95 void record_stop_replaying () override;
96
97 enum target_xfer_status xfer_partial (enum target_object object,
98 const char *annex,
99 gdb_byte *readbuf,
100 const gdb_byte *writebuf,
101 ULONGEST offset, ULONGEST len,
102 ULONGEST *xfered_len) override;
103
104 int insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *) override;
106 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
107 enum remove_bp_reason) override;
108
109 void fetch_registers (struct regcache *, int) override;
110
111 void store_registers (struct regcache *, int) override;
112 void prepare_to_store (struct regcache *) override;
113
114 const struct frame_unwind *get_unwinder () override;
115
116 const struct frame_unwind *get_tailcall_unwinder () override;
117
118 void commit_resume () override;
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
57810aa7 124 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
57810aa7 129 bool can_execute_reverse () override;
f6ac5f3d 130
57810aa7
PA
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 133
57810aa7
PA
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140};
141
142static record_btrace_target record_btrace_ops;
143
144/* Initialize the record-btrace target ops. */
afedecd3 145
76727919
TT
146/* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
3dcfdc58 148static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 149
67b5c0c1
MM
150/* Memory access types used in set/show record btrace replay-memory-access. */
151static const char replay_memory_access_read_only[] = "read-only";
152static const char replay_memory_access_read_write[] = "read-write";
153static const char *const replay_memory_access_types[] =
154{
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158};
159
160/* The currently allowed replay memory access type. */
161static const char *replay_memory_access = replay_memory_access_read_only;
162
4a4495d6
MM
163/* The cpu state kinds. */
164enum record_btrace_cpu_state_kind
165{
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169};
170
171/* The current cpu state. */
172static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174/* The current cpu for trace decode. */
175static struct btrace_cpu record_btrace_cpu;
176
67b5c0c1
MM
177/* Command lists for "set/show record btrace". */
178static struct cmd_list_element *set_record_btrace_cmdlist;
179static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 180
70ad5bff
MM
181/* The execution direction of the last resume we got. See record-full.c. */
182static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184/* The async event handler for reverse/replay execution. */
185static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
aef92902
MM
187/* A flag indicating that we are currently generating a core file. */
188static int record_btrace_generating_corefile;
189
f4abbc16
MM
190/* The current branch trace configuration. */
191static struct btrace_config record_btrace_conf;
192
193/* Command list for "record btrace". */
194static struct cmd_list_element *record_btrace_cmdlist;
195
d33501a5
MM
196/* Command lists for "set/show record btrace bts". */
197static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
b20a6524
MM
200/* Command lists for "set/show record btrace pt". */
201static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
4a4495d6
MM
204/* Command list for "set record btrace cpu". */
205static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
afedecd3
MM
207/* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210#define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
4a4495d6
MM
220/* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222const struct btrace_cpu *
223record_btrace_get_cpu (void)
224{
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238}
239
afedecd3 240/* Update the branch trace for the current thread and return a pointer to its
066ce621 241 thread_info.
afedecd3
MM
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
066ce621
MM
246static struct thread_info *
247require_btrace_thread (void)
afedecd3 248{
afedecd3
MM
249 DEBUG ("require");
250
00431a78 251 if (inferior_ptid == null_ptid)
afedecd3
MM
252 error (_("No thread."));
253
00431a78
PA
254 thread_info *tp = inferior_thread ();
255
cd4007e4
MM
256 validate_registers_access ();
257
4a4495d6 258 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 259
6e07b1d2 260 if (btrace_is_empty (tp))
afedecd3
MM
261 error (_("No trace."));
262
066ce621
MM
263 return tp;
264}
265
266/* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272static struct btrace_thread_info *
273require_btrace (void)
274{
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
afedecd3
MM
280}
281
282/* Enable branch tracing for one thread. Warn on errors. */
283
284static void
285record_btrace_enable_warn (struct thread_info *tp)
286{
a70b8144 287 try
492d29ea
PA
288 {
289 btrace_enable (tp, &record_btrace_conf);
290 }
230d2906 291 catch (const gdb_exception_error &error)
492d29ea 292 {
3d6e9d23 293 warning ("%s", error.what ());
492d29ea 294 }
afedecd3
MM
295}
296
afedecd3
MM
297/* Enable automatic tracing of new threads. */
298
299static void
300record_btrace_auto_enable (void)
301{
302 DEBUG ("attach thread observer");
303
76727919
TT
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
afedecd3
MM
306}
307
308/* Disable automatic tracing of new threads. */
309
310static void
311record_btrace_auto_disable (void)
312{
afedecd3
MM
313 DEBUG ("detach thread observer");
314
76727919 315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
316}
317
70ad5bff
MM
318/* The record-btrace async event handler function. */
319
320static void
321record_btrace_handle_async_inferior_event (gdb_client_data data)
322{
323 inferior_event_handler (INF_REG_EVENT, NULL);
324}
325
c0272db5
TW
326/* See record-btrace.h. */
327
328void
329record_btrace_push_target (void)
330{
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
76727919 343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
344}
345
228f1508
SM
346/* Disable btrace on a set of threads on scope exit. */
347
348struct scoped_btrace_disable
349{
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370private:
371 std::forward_list<thread_info *> m_threads;
372};
373
d9f719f1 374/* Open target record-btrace. */
afedecd3 375
d9f719f1
PA
376static void
377record_btrace_target_open (const char *args, int from_tty)
afedecd3 378{
228f1508
SM
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
afedecd3
MM
382
383 DEBUG ("open");
384
8213266a 385 record_preopen ();
afedecd3
MM
386
387 if (!target_has_execution)
388 error (_("The program is not being run."));
389
08036331 390 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 392 {
f4abbc16 393 btrace_enable (tp, &record_btrace_conf);
afedecd3 394
228f1508 395 btrace_disable.add_thread (tp);
afedecd3
MM
396 }
397
c0272db5 398 record_btrace_push_target ();
afedecd3 399
228f1508 400 btrace_disable.discard ();
afedecd3
MM
401}
402
f6ac5f3d 403/* The stop_recording method of target record-btrace. */
afedecd3 404
f6ac5f3d
PA
405void
406record_btrace_target::stop_recording ()
afedecd3 407{
afedecd3
MM
408 DEBUG ("stop recording");
409
410 record_btrace_auto_disable ();
411
08036331 412 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
413 if (tp->btrace.target != NULL)
414 btrace_disable (tp);
415}
416
f6ac5f3d 417/* The disconnect method of target record-btrace. */
c0272db5 418
f6ac5f3d
PA
419void
420record_btrace_target::disconnect (const char *args,
421 int from_tty)
c0272db5 422{
b6a8c27b 423 struct target_ops *beneath = this->beneath ();
c0272db5
TW
424
425 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 426 unpush_target (this);
c0272db5
TW
427
428 /* Forward disconnect. */
f6ac5f3d 429 beneath->disconnect (args, from_tty);
c0272db5
TW
430}
431
f6ac5f3d 432/* The close method of target record-btrace. */
afedecd3 433
f6ac5f3d
PA
434void
435record_btrace_target::close ()
afedecd3 436{
70ad5bff
MM
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439
99c819ee
MM
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
443
568e808b
MM
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
08036331 446 for (thread_info *tp : all_non_exited_threads ())
568e808b 447 btrace_teardown (tp);
afedecd3
MM
448}
449
f6ac5f3d 450/* The async method of target record-btrace. */
b7d2e916 451
f6ac5f3d
PA
452void
453record_btrace_target::async (int enable)
b7d2e916 454{
6a3753b3 455 if (enable)
b7d2e916
PA
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 else
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459
b6a8c27b 460 this->beneath ()->async (enable);
b7d2e916
PA
461}
462
d33501a5
MM
463/* Adjusts the size and returns a human readable size suffix. */
464
465static const char *
466record_btrace_adjust_size (unsigned int *size)
467{
468 unsigned int sz;
469
470 sz = *size;
471
472 if ((sz & ((1u << 30) - 1)) == 0)
473 {
474 *size = sz >> 30;
475 return "GB";
476 }
477 else if ((sz & ((1u << 20) - 1)) == 0)
478 {
479 *size = sz >> 20;
480 return "MB";
481 }
482 else if ((sz & ((1u << 10) - 1)) == 0)
483 {
484 *size = sz >> 10;
485 return "kB";
486 }
487 else
488 return "";
489}
490
491/* Print a BTS configuration. */
492
493static void
494record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
495{
496 const char *suffix;
497 unsigned int size;
498
499 size = conf->size;
500 if (size > 0)
501 {
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
504 }
505}
506
bc504a31 507/* Print an Intel Processor Trace configuration. */
b20a6524
MM
508
509static void
510record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
511{
512 const char *suffix;
513 unsigned int size;
514
515 size = conf->size;
516 if (size > 0)
517 {
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
520 }
521}
522
d33501a5
MM
523/* Print a branch tracing configuration. */
524
525static void
526record_btrace_print_conf (const struct btrace_config *conf)
527{
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
530
531 switch (conf->format)
532 {
533 case BTRACE_FORMAT_NONE:
534 return;
535
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
538 return;
b20a6524
MM
539
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
542 return;
d33501a5
MM
543 }
544
40c94099 545 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
d33501a5
MM
546}
547
f6ac5f3d 548/* The info_record method of target record-btrace. */
afedecd3 549
f6ac5f3d
PA
550void
551record_btrace_target::info_record ()
afedecd3
MM
552{
553 struct btrace_thread_info *btinfo;
f4abbc16 554 const struct btrace_config *conf;
afedecd3 555 struct thread_info *tp;
31fd9caa 556 unsigned int insns, calls, gaps;
afedecd3
MM
557
558 DEBUG ("info");
559
5b6d1e4f 560 if (inferior_ptid == null_ptid)
afedecd3
MM
561 error (_("No thread."));
562
5b6d1e4f
PA
563 tp = inferior_thread ();
564
cd4007e4
MM
565 validate_registers_access ();
566
f4abbc16
MM
567 btinfo = &tp->btrace;
568
f6ac5f3d 569 conf = ::btrace_conf (btinfo);
f4abbc16 570 if (conf != NULL)
d33501a5 571 record_btrace_print_conf (conf);
f4abbc16 572
4a4495d6 573 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 574
23a7fe75
MM
575 insns = 0;
576 calls = 0;
31fd9caa 577 gaps = 0;
23a7fe75 578
6e07b1d2 579 if (!btrace_is_empty (tp))
23a7fe75
MM
580 {
581 struct btrace_call_iterator call;
582 struct btrace_insn_iterator insn;
583
584 btrace_call_end (&call, btinfo);
585 btrace_call_prev (&call, 1);
5de9129b 586 calls = btrace_call_number (&call);
23a7fe75
MM
587
588 btrace_insn_end (&insn, btinfo);
5de9129b 589 insns = btrace_insn_number (&insn);
31fd9caa 590
69090cee
TW
591 /* If the last instruction is not a gap, it is the current instruction
592 that is not actually part of the record. */
593 if (btrace_insn_get (&insn) != NULL)
594 insns -= 1;
31fd9caa
MM
595
596 gaps = btinfo->ngaps;
23a7fe75 597 }
afedecd3 598
31fd9caa 599 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 600 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
601 print_thread_id (tp),
602 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
603
604 if (btrace_is_replaying (tp))
605 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
606 btrace_insn_number (btinfo->replay));
afedecd3
MM
607}
608
31fd9caa
MM
609/* Print a decode error. */
610
611static void
612btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
613 enum btrace_format format)
614{
508352a9 615 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 616
112e8700 617 uiout->text (_("["));
508352a9
TW
618 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
619 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 620 {
112e8700 621 uiout->text (_("decode error ("));
381befee 622 uiout->field_signed ("errcode", errcode);
112e8700 623 uiout->text (_("): "));
31fd9caa 624 }
112e8700
SM
625 uiout->text (errstr);
626 uiout->text (_("]\n"));
31fd9caa
MM
627}
628
f94cc897
MM
629/* A range of source lines. */
630
631struct btrace_line_range
632{
633 /* The symtab this line is from. */
634 struct symtab *symtab;
635
636 /* The first line (inclusive). */
637 int begin;
638
639 /* The last line (exclusive). */
640 int end;
641};
642
643/* Construct a line range. */
644
645static struct btrace_line_range
646btrace_mk_line_range (struct symtab *symtab, int begin, int end)
647{
648 struct btrace_line_range range;
649
650 range.symtab = symtab;
651 range.begin = begin;
652 range.end = end;
653
654 return range;
655}
656
657/* Add a line to a line range. */
658
659static struct btrace_line_range
660btrace_line_range_add (struct btrace_line_range range, int line)
661{
662 if (range.end <= range.begin)
663 {
664 /* This is the first entry. */
665 range.begin = line;
666 range.end = line + 1;
667 }
668 else if (line < range.begin)
669 range.begin = line;
670 else if (range.end < line)
671 range.end = line;
672
673 return range;
674}
675
676/* Return non-zero if RANGE is empty, zero otherwise. */
677
678static int
679btrace_line_range_is_empty (struct btrace_line_range range)
680{
681 return range.end <= range.begin;
682}
683
684/* Return non-zero if LHS contains RHS, zero otherwise. */
685
686static int
687btrace_line_range_contains_range (struct btrace_line_range lhs,
688 struct btrace_line_range rhs)
689{
690 return ((lhs.symtab == rhs.symtab)
691 && (lhs.begin <= rhs.begin)
692 && (rhs.end <= lhs.end));
693}
694
695/* Find the line range associated with PC. */
696
697static struct btrace_line_range
698btrace_find_line_range (CORE_ADDR pc)
699{
700 struct btrace_line_range range;
701 struct linetable_entry *lines;
702 struct linetable *ltable;
703 struct symtab *symtab;
704 int nlines, i;
705
706 symtab = find_pc_line_symtab (pc);
707 if (symtab == NULL)
708 return btrace_mk_line_range (NULL, 0, 0);
709
710 ltable = SYMTAB_LINETABLE (symtab);
711 if (ltable == NULL)
712 return btrace_mk_line_range (symtab, 0, 0);
713
714 nlines = ltable->nitems;
715 lines = ltable->item;
716 if (nlines <= 0)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 range = btrace_mk_line_range (symtab, 0, 0);
720 for (i = 0; i < nlines - 1; i++)
721 {
8c95582d
AB
722 /* The test of is_stmt here was added when the is_stmt field was
723 introduced to the 'struct linetable_entry' structure. This
724 ensured that this loop maintained the same behaviour as before we
725 introduced is_stmt. That said, it might be that we would be
726 better off not checking is_stmt here, this would lead to us
727 possibly adding more line numbers to the range. At the time this
728 change was made I was unsure how to test this so chose to go with
729 maintaining the existing experience. */
730 if ((lines[i].pc == pc) && (lines[i].line != 0)
731 && (lines[i].is_stmt == 1))
f94cc897
MM
732 range = btrace_line_range_add (range, lines[i].line);
733 }
734
735 return range;
736}
737
738/* Print source lines in LINES to UIOUT.
739
740 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
741 instructions corresponding to that source line. When printing a new source
742 line, we do the cleanups for the open chain and open a new cleanup chain for
743 the new source line. If the source line range in LINES is not empty, this
744 function will leave the cleanup chain for the last printed source line open
745 so instructions can be added to it. */
746
747static void
748btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
749 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
750 gdb::optional<ui_out_emit_list> *asm_list,
751 gdb_disassembly_flags flags)
f94cc897 752{
8d297bbf 753 print_source_lines_flags psl_flags;
f94cc897 754
f94cc897
MM
755 if (flags & DISASSEMBLY_FILENAME)
756 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
757
7ea78b59 758 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 759 {
7ea78b59 760 asm_list->reset ();
f94cc897 761
7ea78b59 762 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
763
764 print_source_lines (lines.symtab, line, line + 1, psl_flags);
765
7ea78b59 766 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
767 }
768}
769
afedecd3
MM
770/* Disassemble a section of the recorded instruction trace. */
771
772static void
23a7fe75 773btrace_insn_history (struct ui_out *uiout,
31fd9caa 774 const struct btrace_thread_info *btinfo,
23a7fe75 775 const struct btrace_insn_iterator *begin,
9a24775b
PA
776 const struct btrace_insn_iterator *end,
777 gdb_disassembly_flags flags)
afedecd3 778{
9a24775b
PA
779 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
780 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 781
f94cc897
MM
782 flags |= DISASSEMBLY_SPECULATIVE;
783
7ea78b59
SM
784 struct gdbarch *gdbarch = target_gdbarch ();
785 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 786
7ea78b59 787 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 788
7ea78b59
SM
789 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
790 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 791
046bebe1 792 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
8b172ce7 793
7ea78b59
SM
794 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
795 btrace_insn_next (&it, 1))
afedecd3 796 {
23a7fe75
MM
797 const struct btrace_insn *insn;
798
799 insn = btrace_insn_get (&it);
800
31fd9caa
MM
801 /* A NULL instruction indicates a gap in the trace. */
802 if (insn == NULL)
803 {
804 const struct btrace_config *conf;
805
806 conf = btrace_conf (btinfo);
afedecd3 807
31fd9caa
MM
808 /* We have trace so we must have a configuration. */
809 gdb_assert (conf != NULL);
810
69090cee
TW
811 uiout->field_fmt ("insn-number", "%u",
812 btrace_insn_number (&it));
813 uiout->text ("\t");
814
815 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
816 conf->format);
817 }
818 else
819 {
f94cc897 820 struct disasm_insn dinsn;
da8c46d2 821
f94cc897 822 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 823 {
f94cc897
MM
824 struct btrace_line_range lines;
825
826 lines = btrace_find_line_range (insn->pc);
827 if (!btrace_line_range_is_empty (lines)
828 && !btrace_line_range_contains_range (last_lines, lines))
829 {
7ea78b59
SM
830 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
831 flags);
f94cc897
MM
832 last_lines = lines;
833 }
7ea78b59 834 else if (!src_and_asm_tuple.has_value ())
f94cc897 835 {
7ea78b59
SM
836 gdb_assert (!asm_list.has_value ());
837
838 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
839
f94cc897 840 /* No source information. */
7ea78b59 841 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
842 }
843
7ea78b59
SM
844 gdb_assert (src_and_asm_tuple.has_value ());
845 gdb_assert (asm_list.has_value ());
da8c46d2 846 }
da8c46d2 847
f94cc897
MM
848 memset (&dinsn, 0, sizeof (dinsn));
849 dinsn.number = btrace_insn_number (&it);
850 dinsn.addr = insn->pc;
31fd9caa 851
da8c46d2 852 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 853 dinsn.is_speculative = 1;
da8c46d2 854
046bebe1 855 disasm.pretty_print_insn (&dinsn, flags);
31fd9caa 856 }
afedecd3
MM
857 }
858}
859
f6ac5f3d 860/* The insn_history method of target record-btrace. */
afedecd3 861
f6ac5f3d
PA
862void
863record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
864{
865 struct btrace_thread_info *btinfo;
23a7fe75
MM
866 struct btrace_insn_history *history;
867 struct btrace_insn_iterator begin, end;
afedecd3 868 struct ui_out *uiout;
23a7fe75 869 unsigned int context, covered;
afedecd3
MM
870
871 uiout = current_uiout;
2e783024 872 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 873 context = abs (size);
afedecd3
MM
874 if (context == 0)
875 error (_("Bad record instruction-history-size."));
876
23a7fe75
MM
877 btinfo = require_btrace ();
878 history = btinfo->insn_history;
879 if (history == NULL)
afedecd3 880 {
07bbe694 881 struct btrace_insn_iterator *replay;
afedecd3 882
9a24775b 883 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 884
07bbe694
MM
885 /* If we're replaying, we start at the replay position. Otherwise, we
886 start at the tail of the trace. */
887 replay = btinfo->replay;
888 if (replay != NULL)
889 begin = *replay;
890 else
891 btrace_insn_end (&begin, btinfo);
892
893 /* We start from here and expand in the requested direction. Then we
894 expand in the other direction, as well, to fill up any remaining
895 context. */
896 end = begin;
897 if (size < 0)
898 {
899 /* We want the current position covered, as well. */
900 covered = btrace_insn_next (&end, 1);
901 covered += btrace_insn_prev (&begin, context - covered);
902 covered += btrace_insn_next (&end, context - covered);
903 }
904 else
905 {
906 covered = btrace_insn_next (&end, context);
907 covered += btrace_insn_prev (&begin, context - covered);
908 }
afedecd3
MM
909 }
910 else
911 {
23a7fe75
MM
912 begin = history->begin;
913 end = history->end;
afedecd3 914
9a24775b 915 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 916 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 917
23a7fe75
MM
918 if (size < 0)
919 {
920 end = begin;
921 covered = btrace_insn_prev (&begin, context);
922 }
923 else
924 {
925 begin = end;
926 covered = btrace_insn_next (&end, context);
927 }
afedecd3
MM
928 }
929
23a7fe75 930 if (covered > 0)
31fd9caa 931 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
932 else
933 {
934 if (size < 0)
935 printf_unfiltered (_("At the start of the branch trace record.\n"));
936 else
937 printf_unfiltered (_("At the end of the branch trace record.\n"));
938 }
afedecd3 939
23a7fe75 940 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
941}
942
f6ac5f3d 943/* The insn_history_range method of target record-btrace. */
afedecd3 944
f6ac5f3d
PA
945void
946record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
947 gdb_disassembly_flags flags)
afedecd3
MM
948{
949 struct btrace_thread_info *btinfo;
23a7fe75 950 struct btrace_insn_iterator begin, end;
afedecd3 951 struct ui_out *uiout;
23a7fe75
MM
952 unsigned int low, high;
953 int found;
afedecd3
MM
954
955 uiout = current_uiout;
2e783024 956 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
957 low = from;
958 high = to;
afedecd3 959
9a24775b 960 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
961
962 /* Check for wrap-arounds. */
23a7fe75 963 if (low != from || high != to)
afedecd3
MM
964 error (_("Bad range."));
965
0688d04e 966 if (high < low)
afedecd3
MM
967 error (_("Bad range."));
968
23a7fe75 969 btinfo = require_btrace ();
afedecd3 970
23a7fe75
MM
971 found = btrace_find_insn_by_number (&begin, btinfo, low);
972 if (found == 0)
973 error (_("Range out of bounds."));
afedecd3 974
23a7fe75
MM
975 found = btrace_find_insn_by_number (&end, btinfo, high);
976 if (found == 0)
0688d04e
MM
977 {
978 /* Silently truncate the range. */
979 btrace_insn_end (&end, btinfo);
980 }
981 else
982 {
983 /* We want both begin and end to be inclusive. */
984 btrace_insn_next (&end, 1);
985 }
afedecd3 986
31fd9caa 987 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 988 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
989}
990
f6ac5f3d 991/* The insn_history_from method of target record-btrace. */
afedecd3 992
f6ac5f3d
PA
993void
994record_btrace_target::insn_history_from (ULONGEST from, int size,
995 gdb_disassembly_flags flags)
afedecd3
MM
996{
997 ULONGEST begin, end, context;
998
999 context = abs (size);
0688d04e
MM
1000 if (context == 0)
1001 error (_("Bad record instruction-history-size."));
afedecd3
MM
1002
1003 if (size < 0)
1004 {
1005 end = from;
1006
1007 if (from < context)
1008 begin = 0;
1009 else
0688d04e 1010 begin = from - context + 1;
afedecd3
MM
1011 }
1012 else
1013 {
1014 begin = from;
0688d04e 1015 end = from + context - 1;
afedecd3
MM
1016
1017 /* Check for wrap-around. */
1018 if (end < begin)
1019 end = ULONGEST_MAX;
1020 }
1021
f6ac5f3d 1022 insn_history_range (begin, end, flags);
afedecd3
MM
1023}
1024
1025/* Print the instruction number range for a function call history line. */
1026
1027static void
23a7fe75
MM
1028btrace_call_history_insn_range (struct ui_out *uiout,
1029 const struct btrace_function *bfun)
afedecd3 1030{
7acbe133
MM
1031 unsigned int begin, end, size;
1032
0860c437 1033 size = bfun->insn.size ();
7acbe133 1034 gdb_assert (size > 0);
afedecd3 1035
23a7fe75 1036 begin = bfun->insn_offset;
7acbe133 1037 end = begin + size - 1;
afedecd3 1038
1f77b012 1039 uiout->field_unsigned ("insn begin", begin);
112e8700 1040 uiout->text (",");
1f77b012 1041 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1042}
1043
ce0dfbea
MM
1044/* Compute the lowest and highest source line for the instructions in BFUN
1045 and return them in PBEGIN and PEND.
1046 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1047 result from inlining or macro expansion. */
1048
1049static void
1050btrace_compute_src_line_range (const struct btrace_function *bfun,
1051 int *pbegin, int *pend)
1052{
ce0dfbea
MM
1053 struct symtab *symtab;
1054 struct symbol *sym;
ce0dfbea
MM
1055 int begin, end;
1056
1057 begin = INT_MAX;
1058 end = INT_MIN;
1059
1060 sym = bfun->sym;
1061 if (sym == NULL)
1062 goto out;
1063
1064 symtab = symbol_symtab (sym);
1065
0860c437 1066 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1067 {
1068 struct symtab_and_line sal;
1069
0860c437 1070 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1071 if (sal.symtab != symtab || sal.line == 0)
1072 continue;
1073
325fac50
PA
1074 begin = std::min (begin, sal.line);
1075 end = std::max (end, sal.line);
ce0dfbea
MM
1076 }
1077
1078 out:
1079 *pbegin = begin;
1080 *pend = end;
1081}
1082
afedecd3
MM
1083/* Print the source line information for a function call history line. */
1084
1085static void
23a7fe75
MM
1086btrace_call_history_src_line (struct ui_out *uiout,
1087 const struct btrace_function *bfun)
afedecd3
MM
1088{
1089 struct symbol *sym;
23a7fe75 1090 int begin, end;
afedecd3
MM
1091
1092 sym = bfun->sym;
1093 if (sym == NULL)
1094 return;
1095
112e8700 1096 uiout->field_string ("file",
cbe56571 1097 symtab_to_filename_for_display (symbol_symtab (sym)),
e43b10e1 1098 file_name_style.style ());
afedecd3 1099
ce0dfbea 1100 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1101 if (end < begin)
afedecd3
MM
1102 return;
1103
112e8700 1104 uiout->text (":");
381befee 1105 uiout->field_signed ("min line", begin);
afedecd3 1106
23a7fe75 1107 if (end == begin)
afedecd3
MM
1108 return;
1109
112e8700 1110 uiout->text (",");
381befee 1111 uiout->field_signed ("max line", end);
afedecd3
MM
1112}
1113
0b722aec
MM
1114/* Get the name of a branch trace function. */
1115
1116static const char *
1117btrace_get_bfun_name (const struct btrace_function *bfun)
1118{
1119 struct minimal_symbol *msym;
1120 struct symbol *sym;
1121
1122 if (bfun == NULL)
1123 return "??";
1124
1125 msym = bfun->msym;
1126 sym = bfun->sym;
1127
1128 if (sym != NULL)
987012b8 1129 return sym->print_name ();
0b722aec 1130 else if (msym != NULL)
c9d95fa3 1131 return msym->print_name ();
0b722aec
MM
1132 else
1133 return "??";
1134}
1135
afedecd3
MM
1136/* Disassemble a section of the recorded function trace. */
1137
1138static void
23a7fe75 1139btrace_call_history (struct ui_out *uiout,
8710b709 1140 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1141 const struct btrace_call_iterator *begin,
1142 const struct btrace_call_iterator *end,
8d297bbf 1143 int int_flags)
afedecd3 1144{
23a7fe75 1145 struct btrace_call_iterator it;
8d297bbf 1146 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1147
8d297bbf 1148 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1149 btrace_call_number (end));
afedecd3 1150
23a7fe75 1151 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1152 {
23a7fe75
MM
1153 const struct btrace_function *bfun;
1154 struct minimal_symbol *msym;
1155 struct symbol *sym;
1156
1157 bfun = btrace_call_get (&it);
23a7fe75 1158 sym = bfun->sym;
0b722aec 1159 msym = bfun->msym;
23a7fe75 1160
afedecd3 1161 /* Print the function index. */
1f77b012 1162 uiout->field_unsigned ("index", bfun->number);
112e8700 1163 uiout->text ("\t");
afedecd3 1164
31fd9caa
MM
1165 /* Indicate gaps in the trace. */
1166 if (bfun->errcode != 0)
1167 {
1168 const struct btrace_config *conf;
1169
1170 conf = btrace_conf (btinfo);
1171
1172 /* We have trace so we must have a configuration. */
1173 gdb_assert (conf != NULL);
1174
1175 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1176
1177 continue;
1178 }
1179
8710b709
MM
1180 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1181 {
1182 int level = bfun->level + btinfo->level, i;
1183
1184 for (i = 0; i < level; ++i)
112e8700 1185 uiout->text (" ");
8710b709
MM
1186 }
1187
1188 if (sym != NULL)
987012b8 1189 uiout->field_string ("function", sym->print_name (),
e43b10e1 1190 function_name_style.style ());
8710b709 1191 else if (msym != NULL)
c9d95fa3 1192 uiout->field_string ("function", msym->print_name (),
e43b10e1 1193 function_name_style.style ());
112e8700 1194 else if (!uiout->is_mi_like_p ())
cbe56571 1195 uiout->field_string ("function", "??",
e43b10e1 1196 function_name_style.style ());
8710b709 1197
1e038f67 1198 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1199 {
112e8700 1200 uiout->text (_("\tinst "));
23a7fe75 1201 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1202 }
1203
1e038f67 1204 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1205 {
112e8700 1206 uiout->text (_("\tat "));
23a7fe75 1207 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1208 }
1209
112e8700 1210 uiout->text ("\n");
afedecd3
MM
1211 }
1212}
1213
f6ac5f3d 1214/* The call_history method of target record-btrace. */
afedecd3 1215
f6ac5f3d
PA
1216void
1217record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1218{
1219 struct btrace_thread_info *btinfo;
23a7fe75
MM
1220 struct btrace_call_history *history;
1221 struct btrace_call_iterator begin, end;
afedecd3 1222 struct ui_out *uiout;
23a7fe75 1223 unsigned int context, covered;
afedecd3
MM
1224
1225 uiout = current_uiout;
2e783024 1226 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1227 context = abs (size);
afedecd3
MM
1228 if (context == 0)
1229 error (_("Bad record function-call-history-size."));
1230
23a7fe75
MM
1231 btinfo = require_btrace ();
1232 history = btinfo->call_history;
1233 if (history == NULL)
afedecd3 1234 {
07bbe694 1235 struct btrace_insn_iterator *replay;
afedecd3 1236
0cb7c7b0 1237 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1238
07bbe694
MM
1239 /* If we're replaying, we start at the replay position. Otherwise, we
1240 start at the tail of the trace. */
1241 replay = btinfo->replay;
1242 if (replay != NULL)
1243 {
07bbe694 1244 begin.btinfo = btinfo;
a0f1b963 1245 begin.index = replay->call_index;
07bbe694
MM
1246 }
1247 else
1248 btrace_call_end (&begin, btinfo);
1249
1250 /* We start from here and expand in the requested direction. Then we
1251 expand in the other direction, as well, to fill up any remaining
1252 context. */
1253 end = begin;
1254 if (size < 0)
1255 {
1256 /* We want the current position covered, as well. */
1257 covered = btrace_call_next (&end, 1);
1258 covered += btrace_call_prev (&begin, context - covered);
1259 covered += btrace_call_next (&end, context - covered);
1260 }
1261 else
1262 {
1263 covered = btrace_call_next (&end, context);
1264 covered += btrace_call_prev (&begin, context- covered);
1265 }
afedecd3
MM
1266 }
1267 else
1268 {
23a7fe75
MM
1269 begin = history->begin;
1270 end = history->end;
afedecd3 1271
0cb7c7b0 1272 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1273 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1274
23a7fe75
MM
1275 if (size < 0)
1276 {
1277 end = begin;
1278 covered = btrace_call_prev (&begin, context);
1279 }
1280 else
1281 {
1282 begin = end;
1283 covered = btrace_call_next (&end, context);
1284 }
afedecd3
MM
1285 }
1286
23a7fe75 1287 if (covered > 0)
8710b709 1288 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1289 else
1290 {
1291 if (size < 0)
1292 printf_unfiltered (_("At the start of the branch trace record.\n"));
1293 else
1294 printf_unfiltered (_("At the end of the branch trace record.\n"));
1295 }
afedecd3 1296
23a7fe75 1297 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1298}
1299
f6ac5f3d 1300/* The call_history_range method of target record-btrace. */
afedecd3 1301
f6ac5f3d
PA
1302void
1303record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1304 record_print_flags flags)
afedecd3
MM
1305{
1306 struct btrace_thread_info *btinfo;
23a7fe75 1307 struct btrace_call_iterator begin, end;
afedecd3 1308 struct ui_out *uiout;
23a7fe75
MM
1309 unsigned int low, high;
1310 int found;
afedecd3
MM
1311
1312 uiout = current_uiout;
2e783024 1313 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1314 low = from;
1315 high = to;
afedecd3 1316
0cb7c7b0 1317 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1318
1319 /* Check for wrap-arounds. */
23a7fe75 1320 if (low != from || high != to)
afedecd3
MM
1321 error (_("Bad range."));
1322
0688d04e 1323 if (high < low)
afedecd3
MM
1324 error (_("Bad range."));
1325
23a7fe75 1326 btinfo = require_btrace ();
afedecd3 1327
23a7fe75
MM
1328 found = btrace_find_call_by_number (&begin, btinfo, low);
1329 if (found == 0)
1330 error (_("Range out of bounds."));
afedecd3 1331
23a7fe75
MM
1332 found = btrace_find_call_by_number (&end, btinfo, high);
1333 if (found == 0)
0688d04e
MM
1334 {
1335 /* Silently truncate the range. */
1336 btrace_call_end (&end, btinfo);
1337 }
1338 else
1339 {
1340 /* We want both begin and end to be inclusive. */
1341 btrace_call_next (&end, 1);
1342 }
afedecd3 1343
8710b709 1344 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1345 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1346}
1347
f6ac5f3d 1348/* The call_history_from method of target record-btrace. */
afedecd3 1349
f6ac5f3d
PA
1350void
1351record_btrace_target::call_history_from (ULONGEST from, int size,
1352 record_print_flags flags)
afedecd3
MM
1353{
1354 ULONGEST begin, end, context;
1355
1356 context = abs (size);
0688d04e
MM
1357 if (context == 0)
1358 error (_("Bad record function-call-history-size."));
afedecd3
MM
1359
1360 if (size < 0)
1361 {
1362 end = from;
1363
1364 if (from < context)
1365 begin = 0;
1366 else
0688d04e 1367 begin = from - context + 1;
afedecd3
MM
1368 }
1369 else
1370 {
1371 begin = from;
0688d04e 1372 end = from + context - 1;
afedecd3
MM
1373
1374 /* Check for wrap-around. */
1375 if (end < begin)
1376 end = ULONGEST_MAX;
1377 }
1378
f6ac5f3d 1379 call_history_range ( begin, end, flags);
afedecd3
MM
1380}
1381
f6ac5f3d 1382/* The record_method method of target record-btrace. */
b158a20f 1383
f6ac5f3d
PA
1384enum record_method
1385record_btrace_target::record_method (ptid_t ptid)
b158a20f 1386{
5b6d1e4f
PA
1387 process_stratum_target *proc_target = current_inferior ()->process_target ();
1388 thread_info *const tp = find_thread_ptid (proc_target, ptid);
b158a20f
TW
1389
1390 if (tp == NULL)
1391 error (_("No thread."));
1392
1393 if (tp->btrace.target == NULL)
1394 return RECORD_METHOD_NONE;
1395
1396 return RECORD_METHOD_BTRACE;
1397}
1398
f6ac5f3d 1399/* The record_is_replaying method of target record-btrace. */
07bbe694 1400
57810aa7 1401bool
f6ac5f3d 1402record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1403{
5b6d1e4f
PA
1404 process_stratum_target *proc_target = current_inferior ()->process_target ();
1405 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 1406 if (btrace_is_replaying (tp))
57810aa7 1407 return true;
07bbe694 1408
57810aa7 1409 return false;
07bbe694
MM
1410}
1411
f6ac5f3d 1412/* The record_will_replay method of target record-btrace. */
7ff27e9b 1413
57810aa7 1414bool
f6ac5f3d 1415record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1416{
f6ac5f3d 1417 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1418}
1419
f6ac5f3d 1420/* The xfer_partial method of target record-btrace. */
633785ff 1421
f6ac5f3d
PA
1422enum target_xfer_status
1423record_btrace_target::xfer_partial (enum target_object object,
1424 const char *annex, gdb_byte *readbuf,
1425 const gdb_byte *writebuf, ULONGEST offset,
1426 ULONGEST len, ULONGEST *xfered_len)
633785ff 1427{
633785ff 1428 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1429 if (replay_memory_access == replay_memory_access_read_only
aef92902 1430 && !record_btrace_generating_corefile
f6ac5f3d 1431 && record_is_replaying (inferior_ptid))
633785ff
MM
1432 {
1433 switch (object)
1434 {
1435 case TARGET_OBJECT_MEMORY:
1436 {
1437 struct target_section *section;
1438
1439 /* We do not allow writing memory in general. */
1440 if (writebuf != NULL)
9b409511
YQ
1441 {
1442 *xfered_len = len;
bc113b4e 1443 return TARGET_XFER_UNAVAILABLE;
9b409511 1444 }
633785ff
MM
1445
1446 /* We allow reading readonly memory. */
f6ac5f3d 1447 section = target_section_by_addr (this, offset);
633785ff
MM
1448 if (section != NULL)
1449 {
1450 /* Check if the section we found is readonly. */
fd361982 1451 if ((bfd_section_flags (section->the_bfd_section)
633785ff
MM
1452 & SEC_READONLY) != 0)
1453 {
1454 /* Truncate the request to fit into this section. */
325fac50 1455 len = std::min (len, section->endaddr - offset);
633785ff
MM
1456 break;
1457 }
1458 }
1459
9b409511 1460 *xfered_len = len;
bc113b4e 1461 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1462 }
1463 }
1464 }
1465
1466 /* Forward the request. */
b6a8c27b
PA
1467 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1468 offset, len, xfered_len);
633785ff
MM
1469}
1470
f6ac5f3d 1471/* The insert_breakpoint method of target record-btrace. */
633785ff 1472
f6ac5f3d
PA
1473int
1474record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1475 struct bp_target_info *bp_tgt)
633785ff 1476{
67b5c0c1
MM
1477 const char *old;
1478 int ret;
633785ff
MM
1479
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
67b5c0c1
MM
1482 old = replay_memory_access;
1483 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1484
1485 ret = 0;
a70b8144 1486 try
492d29ea 1487 {
b6a8c27b 1488 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1489 }
230d2906 1490 catch (const gdb_exception &except)
492d29ea 1491 {
6c63c96a 1492 replay_memory_access = old;
eedc3f4f 1493 throw;
492d29ea 1494 }
6c63c96a 1495 replay_memory_access = old;
633785ff
MM
1496
1497 return ret;
1498}
1499
f6ac5f3d 1500/* The remove_breakpoint method of target record-btrace. */
633785ff 1501
f6ac5f3d
PA
1502int
1503record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1504 struct bp_target_info *bp_tgt,
1505 enum remove_bp_reason reason)
633785ff 1506{
67b5c0c1
MM
1507 const char *old;
1508 int ret;
633785ff
MM
1509
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
67b5c0c1
MM
1512 old = replay_memory_access;
1513 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1514
1515 ret = 0;
a70b8144 1516 try
492d29ea 1517 {
b6a8c27b 1518 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1519 }
230d2906 1520 catch (const gdb_exception &except)
492d29ea 1521 {
6c63c96a 1522 replay_memory_access = old;
eedc3f4f 1523 throw;
492d29ea 1524 }
6c63c96a 1525 replay_memory_access = old;
633785ff
MM
1526
1527 return ret;
1528}
1529
f6ac5f3d 1530/* The fetch_registers method of target record-btrace. */
1f3ef581 1531
f6ac5f3d
PA
1532void
1533record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581 1534{
5b6d1e4f 1535 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1f3ef581
MM
1536 gdb_assert (tp != NULL);
1537
5b6d1e4f 1538 btrace_insn_iterator *replay = tp->btrace.replay;
aef92902 1539 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1540 {
1541 const struct btrace_insn *insn;
1542 struct gdbarch *gdbarch;
1543 int pcreg;
1544
ac7936df 1545 gdbarch = regcache->arch ();
1f3ef581
MM
1546 pcreg = gdbarch_pc_regnum (gdbarch);
1547 if (pcreg < 0)
1548 return;
1549
1550 /* We can only provide the PC register. */
1551 if (regno >= 0 && regno != pcreg)
1552 return;
1553
1554 insn = btrace_insn_get (replay);
1555 gdb_assert (insn != NULL);
1556
73e1c03f 1557 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1558 }
1559 else
b6a8c27b 1560 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1561}
1562
f6ac5f3d 1563/* The store_registers method of target record-btrace. */
1f3ef581 1564
f6ac5f3d
PA
1565void
1566record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1567{
a52eab48 1568 if (!record_btrace_generating_corefile
222312d3 1569 && record_is_replaying (regcache->ptid ()))
4d10e986 1570 error (_("Cannot write registers while replaying."));
1f3ef581 1571
491144b5 1572 gdb_assert (may_write_registers);
1f3ef581 1573
b6a8c27b 1574 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1575}
1576
f6ac5f3d 1577/* The prepare_to_store method of target record-btrace. */
1f3ef581 1578
f6ac5f3d
PA
1579void
1580record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1581{
a52eab48 1582 if (!record_btrace_generating_corefile
222312d3 1583 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1584 return;
1585
b6a8c27b 1586 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1587}
1588
0b722aec
MM
1589/* The branch trace frame cache. */
1590
1591struct btrace_frame_cache
1592{
1593 /* The thread. */
1594 struct thread_info *tp;
1595
1596 /* The frame info. */
1597 struct frame_info *frame;
1598
1599 /* The branch trace function segment. */
1600 const struct btrace_function *bfun;
1601};
1602
1603/* A struct btrace_frame_cache hash table indexed by NEXT. */
1604
1605static htab_t bfcache;
1606
1607/* hash_f for htab_create_alloc of bfcache. */
1608
1609static hashval_t
1610bfcache_hash (const void *arg)
1611{
19ba03f4
SM
1612 const struct btrace_frame_cache *cache
1613 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1614
1615 return htab_hash_pointer (cache->frame);
1616}
1617
1618/* eq_f for htab_create_alloc of bfcache. */
1619
1620static int
1621bfcache_eq (const void *arg1, const void *arg2)
1622{
19ba03f4
SM
1623 const struct btrace_frame_cache *cache1
1624 = (const struct btrace_frame_cache *) arg1;
1625 const struct btrace_frame_cache *cache2
1626 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1627
1628 return cache1->frame == cache2->frame;
1629}
1630
1631/* Create a new btrace frame cache. */
1632
1633static struct btrace_frame_cache *
1634bfcache_new (struct frame_info *frame)
1635{
1636 struct btrace_frame_cache *cache;
1637 void **slot;
1638
1639 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1640 cache->frame = frame;
1641
1642 slot = htab_find_slot (bfcache, cache, INSERT);
1643 gdb_assert (*slot == NULL);
1644 *slot = cache;
1645
1646 return cache;
1647}
1648
1649/* Extract the branch trace function from a branch trace frame. */
1650
1651static const struct btrace_function *
1652btrace_get_frame_function (struct frame_info *frame)
1653{
1654 const struct btrace_frame_cache *cache;
0b722aec
MM
1655 struct btrace_frame_cache pattern;
1656 void **slot;
1657
1658 pattern.frame = frame;
1659
1660 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1661 if (slot == NULL)
1662 return NULL;
1663
19ba03f4 1664 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1665 return cache->bfun;
1666}
1667
cecac1ab
MM
1668/* Implement stop_reason method for record_btrace_frame_unwind. */
1669
1670static enum unwind_stop_reason
1671record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1672 void **this_cache)
1673{
0b722aec
MM
1674 const struct btrace_frame_cache *cache;
1675 const struct btrace_function *bfun;
1676
19ba03f4 1677 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1678 bfun = cache->bfun;
1679 gdb_assert (bfun != NULL);
1680
42bfe59e 1681 if (bfun->up == 0)
0b722aec
MM
1682 return UNWIND_UNAVAILABLE;
1683
1684 return UNWIND_NO_REASON;
cecac1ab
MM
1685}
1686
1687/* Implement this_id method for record_btrace_frame_unwind. */
1688
1689static void
1690record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1691 struct frame_id *this_id)
1692{
0b722aec
MM
1693 const struct btrace_frame_cache *cache;
1694 const struct btrace_function *bfun;
4aeb0dfc 1695 struct btrace_call_iterator it;
0b722aec
MM
1696 CORE_ADDR code, special;
1697
19ba03f4 1698 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1699
1700 bfun = cache->bfun;
1701 gdb_assert (bfun != NULL);
1702
4aeb0dfc
TW
1703 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1704 bfun = btrace_call_get (&it);
0b722aec
MM
1705
1706 code = get_frame_func (this_frame);
1707 special = bfun->number;
1708
1709 *this_id = frame_id_build_unavailable_stack_special (code, special);
1710
1711 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1712 btrace_get_bfun_name (cache->bfun),
1713 core_addr_to_string_nz (this_id->code_addr),
1714 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1715}
1716
1717/* Implement prev_register method for record_btrace_frame_unwind. */
1718
1719static struct value *
1720record_btrace_frame_prev_register (struct frame_info *this_frame,
1721 void **this_cache,
1722 int regnum)
1723{
0b722aec
MM
1724 const struct btrace_frame_cache *cache;
1725 const struct btrace_function *bfun, *caller;
42bfe59e 1726 struct btrace_call_iterator it;
0b722aec
MM
1727 struct gdbarch *gdbarch;
1728 CORE_ADDR pc;
1729 int pcreg;
1730
1731 gdbarch = get_frame_arch (this_frame);
1732 pcreg = gdbarch_pc_regnum (gdbarch);
1733 if (pcreg < 0 || regnum != pcreg)
1734 throw_error (NOT_AVAILABLE_ERROR,
1735 _("Registers are not available in btrace record history"));
1736
19ba03f4 1737 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1738 bfun = cache->bfun;
1739 gdb_assert (bfun != NULL);
1740
42bfe59e 1741 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1742 throw_error (NOT_AVAILABLE_ERROR,
1743 _("No caller in btrace record history"));
1744
42bfe59e
TW
1745 caller = btrace_call_get (&it);
1746
0b722aec 1747 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1748 pc = caller->insn.front ().pc;
0b722aec
MM
1749 else
1750 {
0860c437 1751 pc = caller->insn.back ().pc;
0b722aec
MM
1752 pc += gdb_insn_length (gdbarch, pc);
1753 }
1754
1755 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1756 btrace_get_bfun_name (bfun), bfun->level,
1757 core_addr_to_string_nz (pc));
1758
1759 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1760}
1761
1762/* Implement sniffer method for record_btrace_frame_unwind. */
1763
1764static int
1765record_btrace_frame_sniffer (const struct frame_unwind *self,
1766 struct frame_info *this_frame,
1767 void **this_cache)
1768{
0b722aec
MM
1769 const struct btrace_function *bfun;
1770 struct btrace_frame_cache *cache;
cecac1ab 1771 struct thread_info *tp;
0b722aec 1772 struct frame_info *next;
cecac1ab
MM
1773
1774 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1775 tp = inferior_thread ();
cecac1ab 1776
0b722aec
MM
1777 bfun = NULL;
1778 next = get_next_frame (this_frame);
1779 if (next == NULL)
1780 {
1781 const struct btrace_insn_iterator *replay;
1782
1783 replay = tp->btrace.replay;
1784 if (replay != NULL)
08c3f6d2 1785 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1786 }
1787 else
1788 {
1789 const struct btrace_function *callee;
42bfe59e 1790 struct btrace_call_iterator it;
0b722aec
MM
1791
1792 callee = btrace_get_frame_function (next);
42bfe59e
TW
1793 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1794 return 0;
1795
1796 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1797 return 0;
1798
1799 bfun = btrace_call_get (&it);
0b722aec
MM
1800 }
1801
1802 if (bfun == NULL)
1803 return 0;
1804
1805 DEBUG ("[frame] sniffed frame for %s on level %d",
1806 btrace_get_bfun_name (bfun), bfun->level);
1807
1808 /* This is our frame. Initialize the frame cache. */
1809 cache = bfcache_new (this_frame);
1810 cache->tp = tp;
1811 cache->bfun = bfun;
1812
1813 *this_cache = cache;
1814 return 1;
1815}
1816
1817/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1818
1819static int
1820record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1821 struct frame_info *this_frame,
1822 void **this_cache)
1823{
1824 const struct btrace_function *bfun, *callee;
1825 struct btrace_frame_cache *cache;
42bfe59e 1826 struct btrace_call_iterator it;
0b722aec 1827 struct frame_info *next;
42bfe59e 1828 struct thread_info *tinfo;
0b722aec
MM
1829
1830 next = get_next_frame (this_frame);
1831 if (next == NULL)
1832 return 0;
1833
1834 callee = btrace_get_frame_function (next);
1835 if (callee == NULL)
1836 return 0;
1837
1838 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1839 return 0;
1840
00431a78 1841 tinfo = inferior_thread ();
42bfe59e 1842 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1843 return 0;
1844
42bfe59e
TW
1845 bfun = btrace_call_get (&it);
1846
0b722aec
MM
1847 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1848 btrace_get_bfun_name (bfun), bfun->level);
1849
1850 /* This is our frame. Initialize the frame cache. */
1851 cache = bfcache_new (this_frame);
42bfe59e 1852 cache->tp = tinfo;
0b722aec
MM
1853 cache->bfun = bfun;
1854
1855 *this_cache = cache;
1856 return 1;
1857}
1858
1859static void
1860record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1861{
1862 struct btrace_frame_cache *cache;
1863 void **slot;
1864
19ba03f4 1865 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1866
1867 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1868 gdb_assert (slot != NULL);
1869
1870 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1871}
1872
1873/* btrace recording does not store previous memory content, neither the stack
30baf67b 1874 frames content. Any unwinding would return erroneous results as the stack
cecac1ab
MM
1875 contents no longer matches the changed PC value restored from history.
1876 Therefore this unwinder reports any possibly unwound registers as
1877 <unavailable>. */
1878
0b722aec 1879const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1880{
1881 NORMAL_FRAME,
1882 record_btrace_frame_unwind_stop_reason,
1883 record_btrace_frame_this_id,
1884 record_btrace_frame_prev_register,
1885 NULL,
0b722aec
MM
1886 record_btrace_frame_sniffer,
1887 record_btrace_frame_dealloc_cache
1888};
1889
1890const struct frame_unwind record_btrace_tailcall_frame_unwind =
1891{
1892 TAILCALL_FRAME,
1893 record_btrace_frame_unwind_stop_reason,
1894 record_btrace_frame_this_id,
1895 record_btrace_frame_prev_register,
1896 NULL,
1897 record_btrace_tailcall_frame_sniffer,
1898 record_btrace_frame_dealloc_cache
cecac1ab 1899};
b2f4cfde 1900
f6ac5f3d 1901/* Implement the get_unwinder method. */
ac01945b 1902
f6ac5f3d
PA
1903const struct frame_unwind *
1904record_btrace_target::get_unwinder ()
ac01945b
TT
1905{
1906 return &record_btrace_frame_unwind;
1907}
1908
f6ac5f3d 1909/* Implement the get_tailcall_unwinder method. */
ac01945b 1910
f6ac5f3d
PA
1911const struct frame_unwind *
1912record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1913{
1914 return &record_btrace_tailcall_frame_unwind;
1915}
1916
987e68b1
MM
1917/* Return a human-readable string for FLAG. */
1918
1919static const char *
1920btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1921{
1922 switch (flag)
1923 {
1924 case BTHR_STEP:
1925 return "step";
1926
1927 case BTHR_RSTEP:
1928 return "reverse-step";
1929
1930 case BTHR_CONT:
1931 return "cont";
1932
1933 case BTHR_RCONT:
1934 return "reverse-cont";
1935
1936 case BTHR_STOP:
1937 return "stop";
1938 }
1939
1940 return "<invalid>";
1941}
1942
52834460
MM
1943/* Indicate that TP should be resumed according to FLAG. */
1944
1945static void
1946record_btrace_resume_thread (struct thread_info *tp,
1947 enum btrace_thread_flag flag)
1948{
1949 struct btrace_thread_info *btinfo;
1950
43792cf0 1951 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1952 target_pid_to_str (tp->ptid).c_str (), flag,
1953 btrace_thread_flag_to_str (flag));
52834460
MM
1954
1955 btinfo = &tp->btrace;
1956
52834460 1957 /* Fetch the latest branch trace. */
4a4495d6 1958 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1959
0ca912df
MM
1960 /* A resume request overwrites a preceding resume or stop request. */
1961 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1962 btinfo->flags |= flag;
1963}
1964
ec71cc2f
MM
1965/* Get the current frame for TP. */
1966
79b8d3b0
TT
1967static struct frame_id
1968get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1969{
79b8d3b0 1970 struct frame_id id;
719546c4 1971 bool executing;
ec71cc2f 1972
00431a78
PA
1973 /* Set current thread, which is implicitly used by
1974 get_current_frame. */
1975 scoped_restore_current_thread restore_thread;
1976
1977 switch_to_thread (tp);
ec71cc2f 1978
5b6d1e4f
PA
1979 process_stratum_target *proc_target = tp->inf->process_target ();
1980
ec71cc2f
MM
1981 /* Clear the executing flag to allow changes to the current frame.
1982 We are not actually running, yet. We just started a reverse execution
1983 command or a record goto command.
1984 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1985 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f 1986 move the thread. Since we need to recompute the stack, we temporarily
85102364 1987 set EXECUTING to false. */
00431a78 1988 executing = tp->executing;
5b6d1e4f 1989 set_executing (proc_target, inferior_ptid, false);
ec71cc2f 1990
79b8d3b0 1991 id = null_frame_id;
a70b8144 1992 try
ec71cc2f 1993 {
79b8d3b0 1994 id = get_frame_id (get_current_frame ());
ec71cc2f 1995 }
230d2906 1996 catch (const gdb_exception &except)
ec71cc2f
MM
1997 {
1998 /* Restore the previous execution state. */
5b6d1e4f 1999 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2000
eedc3f4f 2001 throw;
ec71cc2f 2002 }
ec71cc2f
MM
2003
2004 /* Restore the previous execution state. */
5b6d1e4f 2005 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2006
79b8d3b0 2007 return id;
ec71cc2f
MM
2008}
2009
52834460
MM
2010/* Start replaying a thread. */
2011
2012static struct btrace_insn_iterator *
2013record_btrace_start_replaying (struct thread_info *tp)
2014{
52834460
MM
2015 struct btrace_insn_iterator *replay;
2016 struct btrace_thread_info *btinfo;
52834460
MM
2017
2018 btinfo = &tp->btrace;
2019 replay = NULL;
2020
2021 /* We can't start replaying without trace. */
b54b03bd 2022 if (btinfo->functions.empty ())
52834460
MM
2023 return NULL;
2024
52834460
MM
2025 /* GDB stores the current frame_id when stepping in order to detects steps
2026 into subroutines.
2027 Since frames are computed differently when we're replaying, we need to
2028 recompute those stored frames and fix them up so we can still detect
2029 subroutines after we started replaying. */
a70b8144 2030 try
52834460 2031 {
52834460
MM
2032 struct frame_id frame_id;
2033 int upd_step_frame_id, upd_step_stack_frame_id;
2034
2035 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2036 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2037
2038 /* Check if we need to update any stepping-related frame id's. */
2039 upd_step_frame_id = frame_id_eq (frame_id,
2040 tp->control.step_frame_id);
2041 upd_step_stack_frame_id = frame_id_eq (frame_id,
2042 tp->control.step_stack_frame_id);
2043
2044 /* We start replaying at the end of the branch trace. This corresponds
2045 to the current instruction. */
8d749320 2046 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2047 btrace_insn_end (replay, btinfo);
2048
31fd9caa
MM
2049 /* Skip gaps at the end of the trace. */
2050 while (btrace_insn_get (replay) == NULL)
2051 {
2052 unsigned int steps;
2053
2054 steps = btrace_insn_prev (replay, 1);
2055 if (steps == 0)
2056 error (_("No trace."));
2057 }
2058
52834460
MM
2059 /* We're not replaying, yet. */
2060 gdb_assert (btinfo->replay == NULL);
2061 btinfo->replay = replay;
2062
2063 /* Make sure we're not using any stale registers. */
00431a78 2064 registers_changed_thread (tp);
52834460
MM
2065
2066 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2067 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2068
2069 /* Replace stepping related frames where necessary. */
2070 if (upd_step_frame_id)
2071 tp->control.step_frame_id = frame_id;
2072 if (upd_step_stack_frame_id)
2073 tp->control.step_stack_frame_id = frame_id;
2074 }
230d2906 2075 catch (const gdb_exception &except)
52834460
MM
2076 {
2077 xfree (btinfo->replay);
2078 btinfo->replay = NULL;
2079
00431a78 2080 registers_changed_thread (tp);
52834460 2081
eedc3f4f 2082 throw;
52834460
MM
2083 }
2084
2085 return replay;
2086}
2087
2088/* Stop replaying a thread. */
2089
2090static void
2091record_btrace_stop_replaying (struct thread_info *tp)
2092{
2093 struct btrace_thread_info *btinfo;
2094
2095 btinfo = &tp->btrace;
2096
2097 xfree (btinfo->replay);
2098 btinfo->replay = NULL;
2099
2100 /* Make sure we're not leaving any stale registers. */
00431a78 2101 registers_changed_thread (tp);
52834460
MM
2102}
2103
e3cfc1c7
MM
2104/* Stop replaying TP if it is at the end of its execution history. */
2105
2106static void
2107record_btrace_stop_replaying_at_end (struct thread_info *tp)
2108{
2109 struct btrace_insn_iterator *replay, end;
2110 struct btrace_thread_info *btinfo;
2111
2112 btinfo = &tp->btrace;
2113 replay = btinfo->replay;
2114
2115 if (replay == NULL)
2116 return;
2117
2118 btrace_insn_end (&end, btinfo);
2119
2120 if (btrace_insn_cmp (replay, &end) == 0)
2121 record_btrace_stop_replaying (tp);
2122}
2123
f6ac5f3d 2124/* The resume method of target record-btrace. */
b2f4cfde 2125
f6ac5f3d
PA
2126void
2127record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2128{
d2939ba2 2129 enum btrace_thread_flag flag, cflag;
52834460 2130
a068643d 2131 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2132 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2133 step ? "step" : "cont");
52834460 2134
0ca912df
MM
2135 /* Store the execution direction of the last resume.
2136
f6ac5f3d 2137 If there is more than one resume call, we have to rely on infrun
0ca912df 2138 to not change the execution direction in-between. */
f6ac5f3d 2139 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2140
0ca912df 2141 /* As long as we're not replaying, just forward the request.
52834460 2142
0ca912df
MM
2143 For non-stop targets this means that no thread is replaying. In order to
2144 make progress, we may need to explicitly move replaying threads to the end
2145 of their execution history. */
f6ac5f3d
PA
2146 if ((::execution_direction != EXEC_REVERSE)
2147 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2148 {
b6a8c27b 2149 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2150 return;
b2f4cfde
MM
2151 }
2152
52834460 2153 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2154 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2155 {
2156 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2157 cflag = BTHR_RCONT;
2158 }
52834460 2159 else
d2939ba2
MM
2160 {
2161 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2162 cflag = BTHR_CONT;
2163 }
52834460 2164
52834460 2165 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2166 record_btrace_wait below.
2167
2168 For all-stop targets, we only step INFERIOR_PTID and continue others. */
5b6d1e4f
PA
2169
2170 process_stratum_target *proc_target = current_inferior ()->process_target ();
2171
d2939ba2
MM
2172 if (!target_is_non_stop_p ())
2173 {
26a57c92 2174 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2175
5b6d1e4f 2176 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2177 {
2178 if (tp->ptid.matches (inferior_ptid))
2179 record_btrace_resume_thread (tp, flag);
2180 else
2181 record_btrace_resume_thread (tp, cflag);
2182 }
d2939ba2
MM
2183 }
2184 else
2185 {
5b6d1e4f 2186 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 2187 record_btrace_resume_thread (tp, flag);
d2939ba2 2188 }
70ad5bff
MM
2189
2190 /* Async support. */
2191 if (target_can_async_p ())
2192 {
6a3753b3 2193 target_async (1);
70ad5bff
MM
2194 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2195 }
52834460
MM
2196}
2197
f6ac5f3d 2198/* The commit_resume method of target record-btrace. */
85ad3aaf 2199
f6ac5f3d
PA
2200void
2201record_btrace_target::commit_resume ()
85ad3aaf 2202{
f6ac5f3d
PA
2203 if ((::execution_direction != EXEC_REVERSE)
2204 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2205 beneath ()->commit_resume ();
85ad3aaf
PA
2206}
2207
987e68b1
MM
2208/* Cancel resuming TP. */
2209
2210static void
2211record_btrace_cancel_resume (struct thread_info *tp)
2212{
2213 enum btrace_thread_flag flags;
2214
2215 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2216 if (flags == 0)
2217 return;
2218
43792cf0
PA
2219 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2220 print_thread_id (tp),
a068643d 2221 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1
MM
2222 btrace_thread_flag_to_str (flags));
2223
2224 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2225 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2226}
2227
2228/* Return a target_waitstatus indicating that we ran out of history. */
2229
2230static struct target_waitstatus
2231btrace_step_no_history (void)
2232{
2233 struct target_waitstatus status;
2234
2235 status.kind = TARGET_WAITKIND_NO_HISTORY;
2236
2237 return status;
2238}
2239
2240/* Return a target_waitstatus indicating that a step finished. */
2241
2242static struct target_waitstatus
2243btrace_step_stopped (void)
2244{
2245 struct target_waitstatus status;
2246
2247 status.kind = TARGET_WAITKIND_STOPPED;
2248 status.value.sig = GDB_SIGNAL_TRAP;
2249
2250 return status;
2251}
2252
6e4879f0
MM
2253/* Return a target_waitstatus indicating that a thread was stopped as
2254 requested. */
2255
2256static struct target_waitstatus
2257btrace_step_stopped_on_request (void)
2258{
2259 struct target_waitstatus status;
2260
2261 status.kind = TARGET_WAITKIND_STOPPED;
2262 status.value.sig = GDB_SIGNAL_0;
2263
2264 return status;
2265}
2266
d825d248
MM
2267/* Return a target_waitstatus indicating a spurious stop. */
2268
2269static struct target_waitstatus
2270btrace_step_spurious (void)
2271{
2272 struct target_waitstatus status;
2273
2274 status.kind = TARGET_WAITKIND_SPURIOUS;
2275
2276 return status;
2277}
2278
e3cfc1c7
MM
2279/* Return a target_waitstatus indicating that the thread was not resumed. */
2280
2281static struct target_waitstatus
2282btrace_step_no_resumed (void)
2283{
2284 struct target_waitstatus status;
2285
2286 status.kind = TARGET_WAITKIND_NO_RESUMED;
2287
2288 return status;
2289}
2290
2291/* Return a target_waitstatus indicating that we should wait again. */
2292
2293static struct target_waitstatus
2294btrace_step_again (void)
2295{
2296 struct target_waitstatus status;
2297
2298 status.kind = TARGET_WAITKIND_IGNORE;
2299
2300 return status;
2301}
2302
52834460
MM
2303/* Clear the record histories. */
2304
2305static void
2306record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2307{
2308 xfree (btinfo->insn_history);
2309 xfree (btinfo->call_history);
2310
2311 btinfo->insn_history = NULL;
2312 btinfo->call_history = NULL;
2313}
2314
3c615f99
MM
2315/* Check whether TP's current replay position is at a breakpoint. */
2316
2317static int
2318record_btrace_replay_at_breakpoint (struct thread_info *tp)
2319{
2320 struct btrace_insn_iterator *replay;
2321 struct btrace_thread_info *btinfo;
2322 const struct btrace_insn *insn;
3c615f99
MM
2323
2324 btinfo = &tp->btrace;
2325 replay = btinfo->replay;
2326
2327 if (replay == NULL)
2328 return 0;
2329
2330 insn = btrace_insn_get (replay);
2331 if (insn == NULL)
2332 return 0;
2333
00431a78 2334 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2335 &btinfo->stop_reason);
2336}
2337
d825d248 2338/* Step one instruction in forward direction. */
52834460
MM
2339
2340static struct target_waitstatus
d825d248 2341record_btrace_single_step_forward (struct thread_info *tp)
52834460 2342{
b61ce85c 2343 struct btrace_insn_iterator *replay, end, start;
52834460 2344 struct btrace_thread_info *btinfo;
52834460 2345
d825d248
MM
2346 btinfo = &tp->btrace;
2347 replay = btinfo->replay;
2348
2349 /* We're done if we're not replaying. */
2350 if (replay == NULL)
2351 return btrace_step_no_history ();
2352
011c71b6
MM
2353 /* Check if we're stepping a breakpoint. */
2354 if (record_btrace_replay_at_breakpoint (tp))
2355 return btrace_step_stopped ();
2356
b61ce85c
MM
2357 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2358 jump back to the instruction at which we started. */
2359 start = *replay;
d825d248
MM
2360 do
2361 {
2362 unsigned int steps;
2363
e3cfc1c7
MM
2364 /* We will bail out here if we continue stepping after reaching the end
2365 of the execution history. */
d825d248
MM
2366 steps = btrace_insn_next (replay, 1);
2367 if (steps == 0)
b61ce85c
MM
2368 {
2369 *replay = start;
2370 return btrace_step_no_history ();
2371 }
d825d248
MM
2372 }
2373 while (btrace_insn_get (replay) == NULL);
2374
2375 /* Determine the end of the instruction trace. */
2376 btrace_insn_end (&end, btinfo);
2377
e3cfc1c7
MM
2378 /* The execution trace contains (and ends with) the current instruction.
2379 This instruction has not been executed, yet, so the trace really ends
2380 one instruction earlier. */
d825d248 2381 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2382 return btrace_step_no_history ();
d825d248
MM
2383
2384 return btrace_step_spurious ();
2385}
2386
2387/* Step one instruction in backward direction. */
2388
2389static struct target_waitstatus
2390record_btrace_single_step_backward (struct thread_info *tp)
2391{
b61ce85c 2392 struct btrace_insn_iterator *replay, start;
d825d248 2393 struct btrace_thread_info *btinfo;
e59fa00f 2394
52834460
MM
2395 btinfo = &tp->btrace;
2396 replay = btinfo->replay;
2397
d825d248
MM
2398 /* Start replaying if we're not already doing so. */
2399 if (replay == NULL)
2400 replay = record_btrace_start_replaying (tp);
2401
2402 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2403 Skip gaps during replay. If we end up at a gap (at the beginning of
2404 the trace), jump back to the instruction at which we started. */
2405 start = *replay;
d825d248
MM
2406 do
2407 {
2408 unsigned int steps;
2409
2410 steps = btrace_insn_prev (replay, 1);
2411 if (steps == 0)
b61ce85c
MM
2412 {
2413 *replay = start;
2414 return btrace_step_no_history ();
2415 }
d825d248
MM
2416 }
2417 while (btrace_insn_get (replay) == NULL);
2418
011c71b6
MM
2419 /* Check if we're stepping a breakpoint.
2420
2421 For reverse-stepping, this check is after the step. There is logic in
2422 infrun.c that handles reverse-stepping separately. See, for example,
2423 proceed and adjust_pc_after_break.
2424
2425 This code assumes that for reverse-stepping, PC points to the last
2426 de-executed instruction, whereas for forward-stepping PC points to the
2427 next to-be-executed instruction. */
2428 if (record_btrace_replay_at_breakpoint (tp))
2429 return btrace_step_stopped ();
2430
d825d248
MM
2431 return btrace_step_spurious ();
2432}
2433
2434/* Step a single thread. */
2435
2436static struct target_waitstatus
2437record_btrace_step_thread (struct thread_info *tp)
2438{
2439 struct btrace_thread_info *btinfo;
2440 struct target_waitstatus status;
2441 enum btrace_thread_flag flags;
2442
2443 btinfo = &tp->btrace;
2444
6e4879f0
MM
2445 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2446 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2447
43792cf0 2448 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d 2449 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1 2450 btrace_thread_flag_to_str (flags));
52834460 2451
6e4879f0
MM
2452 /* We can't step without an execution history. */
2453 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2454 return btrace_step_no_history ();
2455
52834460
MM
2456 switch (flags)
2457 {
2458 default:
2459 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2460
6e4879f0
MM
2461 case BTHR_STOP:
2462 return btrace_step_stopped_on_request ();
2463
52834460 2464 case BTHR_STEP:
d825d248
MM
2465 status = record_btrace_single_step_forward (tp);
2466 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2467 break;
52834460
MM
2468
2469 return btrace_step_stopped ();
2470
2471 case BTHR_RSTEP:
d825d248
MM
2472 status = record_btrace_single_step_backward (tp);
2473 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2474 break;
52834460
MM
2475
2476 return btrace_step_stopped ();
2477
2478 case BTHR_CONT:
e3cfc1c7
MM
2479 status = record_btrace_single_step_forward (tp);
2480 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2481 break;
52834460 2482
e3cfc1c7
MM
2483 btinfo->flags |= flags;
2484 return btrace_step_again ();
52834460
MM
2485
2486 case BTHR_RCONT:
e3cfc1c7
MM
2487 status = record_btrace_single_step_backward (tp);
2488 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2489 break;
52834460 2490
e3cfc1c7
MM
2491 btinfo->flags |= flags;
2492 return btrace_step_again ();
2493 }
d825d248 2494
f6ac5f3d 2495 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2496 method will stop the thread for whom the event is reported. */
2497 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2498 btinfo->flags |= flags;
52834460 2499
e3cfc1c7 2500 return status;
b2f4cfde
MM
2501}
2502
a6b5be76
MM
2503/* Announce further events if necessary. */
2504
2505static void
53127008
SM
2506record_btrace_maybe_mark_async_event
2507 (const std::vector<thread_info *> &moving,
2508 const std::vector<thread_info *> &no_history)
a6b5be76 2509{
53127008
SM
2510 bool more_moving = !moving.empty ();
2511 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2512
2513 if (!more_moving && !more_no_history)
2514 return;
2515
2516 if (more_moving)
2517 DEBUG ("movers pending");
2518
2519 if (more_no_history)
2520 DEBUG ("no-history pending");
2521
2522 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2523}
2524
f6ac5f3d 2525/* The wait method of target record-btrace. */
b2f4cfde 2526
f6ac5f3d
PA
2527ptid_t
2528record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2529 int options)
b2f4cfde 2530{
53127008
SM
2531 std::vector<thread_info *> moving;
2532 std::vector<thread_info *> no_history;
52834460 2533
a068643d 2534 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
52834460 2535
b2f4cfde 2536 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2537 if ((::execution_direction != EXEC_REVERSE)
2538 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2539 {
b6a8c27b 2540 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2541 }
2542
e3cfc1c7 2543 /* Keep a work list of moving threads. */
5b6d1e4f
PA
2544 process_stratum_target *proc_target = current_inferior ()->process_target ();
2545 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2546 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2547 moving.push_back (tp);
e3cfc1c7 2548
53127008 2549 if (moving.empty ())
52834460 2550 {
e3cfc1c7 2551 *status = btrace_step_no_resumed ();
52834460 2552
a068643d 2553 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2554 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2555
e3cfc1c7 2556 return null_ptid;
52834460
MM
2557 }
2558
e3cfc1c7
MM
2559 /* Step moving threads one by one, one step each, until either one thread
2560 reports an event or we run out of threads to step.
2561
2562 When stepping more than one thread, chances are that some threads reach
2563 the end of their execution history earlier than others. If we reported
2564 this immediately, all-stop on top of non-stop would stop all threads and
2565 resume the same threads next time. And we would report the same thread
2566 having reached the end of its execution history again.
2567
2568 In the worst case, this would starve the other threads. But even if other
2569 threads would be allowed to make progress, this would result in far too
2570 many intermediate stops.
2571
2572 We therefore delay the reporting of "no execution history" until we have
2573 nothing else to report. By this time, all threads should have moved to
2574 either the beginning or the end of their execution history. There will
2575 be a single user-visible stop. */
53127008
SM
2576 struct thread_info *eventing = NULL;
2577 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2578 {
53127008 2579 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2580 {
53127008
SM
2581 thread_info *tp = moving[ix];
2582
e3cfc1c7
MM
2583 *status = record_btrace_step_thread (tp);
2584
2585 switch (status->kind)
2586 {
2587 case TARGET_WAITKIND_IGNORE:
2588 ix++;
2589 break;
2590
2591 case TARGET_WAITKIND_NO_HISTORY:
53127008 2592 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2593 break;
2594
2595 default:
53127008 2596 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2597 break;
2598 }
2599 }
2600 }
2601
2602 if (eventing == NULL)
2603 {
2604 /* We started with at least one moving thread. This thread must have
2605 either stopped or reached the end of its execution history.
2606
2607 In the former case, EVENTING must not be NULL.
2608 In the latter case, NO_HISTORY must not be empty. */
53127008 2609 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2610
2611 /* We kept threads moving at the end of their execution history. Stop
2612 EVENTING now that we are going to report its stop. */
53127008 2613 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2614 eventing->btrace.flags &= ~BTHR_MOVE;
2615
2616 *status = btrace_step_no_history ();
2617 }
2618
2619 gdb_assert (eventing != NULL);
2620
2621 /* We kept threads replaying at the end of their execution history. Stop
2622 replaying EVENTING now that we are going to report its stop. */
2623 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2624
2625 /* Stop all other threads. */
5953356c 2626 if (!target_is_non_stop_p ())
53127008 2627 {
08036331 2628 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2629 record_btrace_cancel_resume (tp);
2630 }
52834460 2631
a6b5be76
MM
2632 /* In async mode, we need to announce further events. */
2633 if (target_is_async_p ())
2634 record_btrace_maybe_mark_async_event (moving, no_history);
2635
52834460 2636 /* Start record histories anew from the current position. */
e3cfc1c7 2637 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2638
2639 /* We moved the replay position but did not update registers. */
00431a78 2640 registers_changed_thread (eventing);
e3cfc1c7 2641
43792cf0
PA
2642 DEBUG ("wait ended by thread %s (%s): %s",
2643 print_thread_id (eventing),
a068643d 2644 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2645 target_waitstatus_to_string (status).c_str ());
52834460 2646
e3cfc1c7 2647 return eventing->ptid;
52834460
MM
2648}
2649
f6ac5f3d 2650/* The stop method of target record-btrace. */
6e4879f0 2651
f6ac5f3d
PA
2652void
2653record_btrace_target::stop (ptid_t ptid)
6e4879f0 2654{
a068643d 2655 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2656
2657 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2658 if ((::execution_direction != EXEC_REVERSE)
2659 && !record_is_replaying (minus_one_ptid))
6e4879f0 2660 {
b6a8c27b 2661 this->beneath ()->stop (ptid);
6e4879f0
MM
2662 }
2663 else
2664 {
5b6d1e4f
PA
2665 process_stratum_target *proc_target
2666 = current_inferior ()->process_target ();
2667
2668 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2669 {
2670 tp->btrace.flags &= ~BTHR_MOVE;
2671 tp->btrace.flags |= BTHR_STOP;
2672 }
6e4879f0
MM
2673 }
2674 }
2675
f6ac5f3d 2676/* The can_execute_reverse method of target record-btrace. */
52834460 2677
57810aa7 2678bool
f6ac5f3d 2679record_btrace_target::can_execute_reverse ()
52834460 2680{
57810aa7 2681 return true;
52834460
MM
2682}
2683
f6ac5f3d 2684/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2685
57810aa7 2686bool
f6ac5f3d 2687record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2688{
f6ac5f3d 2689 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2690 {
2691 struct thread_info *tp = inferior_thread ();
2692
2693 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2694 }
2695
b6a8c27b 2696 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2697}
2698
f6ac5f3d 2699/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2700 record-btrace. */
2701
57810aa7 2702bool
f6ac5f3d 2703record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2704{
f6ac5f3d 2705 if (record_is_replaying (minus_one_ptid))
57810aa7 2706 return true;
9e8915c6 2707
b6a8c27b 2708 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2709}
2710
f6ac5f3d 2711/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2712
57810aa7 2713bool
f6ac5f3d 2714record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2715{
f6ac5f3d 2716 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2717 {
2718 struct thread_info *tp = inferior_thread ();
2719
2720 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2721 }
2722
b6a8c27b 2723 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2724}
2725
f6ac5f3d 2726/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2727 record-btrace. */
2728
57810aa7 2729bool
f6ac5f3d 2730record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2731{
f6ac5f3d 2732 if (record_is_replaying (minus_one_ptid))
57810aa7 2733 return true;
52834460 2734
b6a8c27b 2735 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2736}
2737
f6ac5f3d 2738/* The update_thread_list method of target record-btrace. */
e2887aa3 2739
f6ac5f3d
PA
2740void
2741record_btrace_target::update_thread_list ()
e2887aa3 2742{
e8032dde 2743 /* We don't add or remove threads during replay. */
f6ac5f3d 2744 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2745 return;
2746
2747 /* Forward the request. */
b6a8c27b 2748 this->beneath ()->update_thread_list ();
e2887aa3
MM
2749}
2750
f6ac5f3d 2751/* The thread_alive method of target record-btrace. */
e2887aa3 2752
57810aa7 2753bool
f6ac5f3d 2754record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2755{
2756 /* We don't add or remove threads during replay. */
f6ac5f3d 2757 if (record_is_replaying (minus_one_ptid))
00431a78 2758 return true;
e2887aa3
MM
2759
2760 /* Forward the request. */
b6a8c27b 2761 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2762}
2763
066ce621
MM
2764/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2765 is stopped. */
2766
2767static void
2768record_btrace_set_replay (struct thread_info *tp,
2769 const struct btrace_insn_iterator *it)
2770{
2771 struct btrace_thread_info *btinfo;
2772
2773 btinfo = &tp->btrace;
2774
a0f1b963 2775 if (it == NULL)
52834460 2776 record_btrace_stop_replaying (tp);
066ce621
MM
2777 else
2778 {
2779 if (btinfo->replay == NULL)
52834460 2780 record_btrace_start_replaying (tp);
066ce621
MM
2781 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2782 return;
2783
2784 *btinfo->replay = *it;
00431a78 2785 registers_changed_thread (tp);
066ce621
MM
2786 }
2787
52834460
MM
2788 /* Start anew from the new replay position. */
2789 record_btrace_clear_histories (btinfo);
485668e5 2790
f2ffa92b
PA
2791 inferior_thread ()->suspend.stop_pc
2792 = regcache_read_pc (get_current_regcache ());
485668e5 2793 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2794}
2795
f6ac5f3d 2796/* The goto_record_begin method of target record-btrace. */
066ce621 2797
f6ac5f3d
PA
2798void
2799record_btrace_target::goto_record_begin ()
066ce621
MM
2800{
2801 struct thread_info *tp;
2802 struct btrace_insn_iterator begin;
2803
2804 tp = require_btrace_thread ();
2805
2806 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2807
2808 /* Skip gaps at the beginning of the trace. */
2809 while (btrace_insn_get (&begin) == NULL)
2810 {
2811 unsigned int steps;
2812
2813 steps = btrace_insn_next (&begin, 1);
2814 if (steps == 0)
2815 error (_("No trace."));
2816 }
2817
066ce621 2818 record_btrace_set_replay (tp, &begin);
066ce621
MM
2819}
2820
f6ac5f3d 2821/* The goto_record_end method of target record-btrace. */
066ce621 2822
f6ac5f3d
PA
2823void
2824record_btrace_target::goto_record_end ()
066ce621
MM
2825{
2826 struct thread_info *tp;
2827
2828 tp = require_btrace_thread ();
2829
2830 record_btrace_set_replay (tp, NULL);
066ce621
MM
2831}
2832
f6ac5f3d 2833/* The goto_record method of target record-btrace. */
066ce621 2834
f6ac5f3d
PA
2835void
2836record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2837{
2838 struct thread_info *tp;
2839 struct btrace_insn_iterator it;
2840 unsigned int number;
2841 int found;
2842
2843 number = insn;
2844
2845 /* Check for wrap-arounds. */
2846 if (number != insn)
2847 error (_("Instruction number out of range."));
2848
2849 tp = require_btrace_thread ();
2850
2851 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2852
2853 /* Check if the instruction could not be found or is a gap. */
2854 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2855 error (_("No such instruction."));
2856
2857 record_btrace_set_replay (tp, &it);
066ce621
MM
2858}
2859
f6ac5f3d 2860/* The record_stop_replaying method of target record-btrace. */
797094dd 2861
f6ac5f3d
PA
2862void
2863record_btrace_target::record_stop_replaying ()
797094dd 2864{
08036331 2865 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2866 record_btrace_stop_replaying (tp);
2867}
2868
f6ac5f3d 2869/* The execution_direction target method. */
70ad5bff 2870
f6ac5f3d
PA
2871enum exec_direction_kind
2872record_btrace_target::execution_direction ()
70ad5bff
MM
2873{
2874 return record_btrace_resume_exec_dir;
2875}
2876
f6ac5f3d 2877/* The prepare_to_generate_core target method. */
aef92902 2878
f6ac5f3d
PA
2879void
2880record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2881{
2882 record_btrace_generating_corefile = 1;
2883}
2884
f6ac5f3d 2885/* The done_generating_core target method. */
aef92902 2886
f6ac5f3d
PA
2887void
2888record_btrace_target::done_generating_core ()
aef92902
MM
2889{
2890 record_btrace_generating_corefile = 0;
2891}
2892
f4abbc16
MM
2893/* Start recording in BTS format. */
2894
2895static void
cdb34d4a 2896cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2897{
f4abbc16
MM
2898 if (args != NULL && *args != 0)
2899 error (_("Invalid argument."));
2900
2901 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2902
a70b8144 2903 try
492d29ea 2904 {
95a6b0a1 2905 execute_command ("target record-btrace", from_tty);
492d29ea 2906 }
230d2906 2907 catch (const gdb_exception &exception)
f4abbc16
MM
2908 {
2909 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2910 throw;
f4abbc16
MM
2911 }
2912}
2913
bc504a31 2914/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2915
2916static void
cdb34d4a 2917cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2918{
2919 if (args != NULL && *args != 0)
2920 error (_("Invalid argument."));
2921
b20a6524 2922 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2923
a70b8144 2924 try
492d29ea 2925 {
95a6b0a1 2926 execute_command ("target record-btrace", from_tty);
492d29ea 2927 }
230d2906 2928 catch (const gdb_exception &exception)
492d29ea
PA
2929 {
2930 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2931 throw;
492d29ea 2932 }
afedecd3
MM
2933}
2934
b20a6524
MM
2935/* Alias for "target record". */
2936
2937static void
981a3fb3 2938cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2939{
2940 if (args != NULL && *args != 0)
2941 error (_("Invalid argument."));
2942
2943 record_btrace_conf.format = BTRACE_FORMAT_PT;
2944
a70b8144 2945 try
b20a6524 2946 {
95a6b0a1 2947 execute_command ("target record-btrace", from_tty);
b20a6524 2948 }
230d2906 2949 catch (const gdb_exception &exception)
b20a6524
MM
2950 {
2951 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2952
a70b8144 2953 try
b20a6524 2954 {
95a6b0a1 2955 execute_command ("target record-btrace", from_tty);
b20a6524 2956 }
230d2906 2957 catch (const gdb_exception &ex)
b20a6524
MM
2958 {
2959 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2960 throw;
b20a6524 2961 }
b20a6524 2962 }
b20a6524
MM
2963}
2964
67b5c0c1
MM
2965/* The "show record btrace replay-memory-access" command. */
2966
2967static void
2968cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2969 struct cmd_list_element *c, const char *value)
2970{
2971 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2972 replay_memory_access);
2973}
2974
4a4495d6
MM
2975/* The "set record btrace cpu none" command. */
2976
2977static void
2978cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2979{
2980 if (args != nullptr && *args != 0)
2981 error (_("Trailing junk: '%s'."), args);
2982
2983 record_btrace_cpu_state = CS_NONE;
2984}
2985
2986/* The "set record btrace cpu auto" command. */
2987
2988static void
2989cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2990{
2991 if (args != nullptr && *args != 0)
2992 error (_("Trailing junk: '%s'."), args);
2993
2994 record_btrace_cpu_state = CS_AUTO;
2995}
2996
2997/* The "set record btrace cpu" command. */
2998
2999static void
3000cmd_set_record_btrace_cpu (const char *args, int from_tty)
3001{
3002 if (args == nullptr)
3003 args = "";
3004
3005 /* We use a hard-coded vendor string for now. */
3006 unsigned int family, model, stepping;
3007 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3008 &model, &l1, &stepping, &l2);
3009 if (matches == 3)
3010 {
3011 if (strlen (args) != l2)
3012 error (_("Trailing junk: '%s'."), args + l2);
3013 }
3014 else if (matches == 2)
3015 {
3016 if (strlen (args) != l1)
3017 error (_("Trailing junk: '%s'."), args + l1);
3018
3019 stepping = 0;
3020 }
3021 else
3022 error (_("Bad format. See \"help set record btrace cpu\"."));
3023
3024 if (USHRT_MAX < family)
3025 error (_("Cpu family too big."));
3026
3027 if (UCHAR_MAX < model)
3028 error (_("Cpu model too big."));
3029
3030 if (UCHAR_MAX < stepping)
3031 error (_("Cpu stepping too big."));
3032
3033 record_btrace_cpu.vendor = CV_INTEL;
3034 record_btrace_cpu.family = family;
3035 record_btrace_cpu.model = model;
3036 record_btrace_cpu.stepping = stepping;
3037
3038 record_btrace_cpu_state = CS_CPU;
3039}
3040
3041/* The "show record btrace cpu" command. */
3042
3043static void
3044cmd_show_record_btrace_cpu (const char *args, int from_tty)
3045{
4a4495d6
MM
3046 if (args != nullptr && *args != 0)
3047 error (_("Trailing junk: '%s'."), args);
3048
3049 switch (record_btrace_cpu_state)
3050 {
3051 case CS_AUTO:
3052 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3053 return;
3054
3055 case CS_NONE:
3056 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3057 return;
3058
3059 case CS_CPU:
3060 switch (record_btrace_cpu.vendor)
3061 {
3062 case CV_INTEL:
3063 if (record_btrace_cpu.stepping == 0)
3064 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3065 record_btrace_cpu.family,
3066 record_btrace_cpu.model);
3067 else
3068 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3069 record_btrace_cpu.family,
3070 record_btrace_cpu.model,
3071 record_btrace_cpu.stepping);
3072 return;
3073 }
3074 }
3075
3076 error (_("Internal error: bad cpu state."));
3077}
3078
b20a6524
MM
3079/* The "record bts buffer-size" show value function. */
3080
3081static void
3082show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3083 struct cmd_list_element *c,
3084 const char *value)
3085{
3086 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3087 value);
3088}
3089
3090/* The "record pt buffer-size" show value function. */
3091
3092static void
3093show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3094 struct cmd_list_element *c,
3095 const char *value)
3096{
3097 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3098 value);
3099}
3100
afedecd3
MM
3101/* Initialize btrace commands. */
3102
6c265988 3103void _initialize_record_btrace ();
afedecd3 3104void
6c265988 3105_initialize_record_btrace ()
afedecd3 3106{
f4abbc16
MM
3107 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3108 _("Start branch trace recording."), &record_btrace_cmdlist,
3109 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3110 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3111
f4abbc16
MM
3112 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3113 _("\
3114Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3115The processor stores a from/to record for each branch into a cyclic buffer.\n\
3116This format may not be available on all processors."),
3117 &record_btrace_cmdlist);
3118 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3119
b20a6524
MM
3120 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3121 _("\
bc504a31 3122Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3123This format may not be available on all processors."),
3124 &record_btrace_cmdlist);
3125 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3126
0743fc83
TT
3127 add_basic_prefix_cmd ("btrace", class_support,
3128 _("Set record options."), &set_record_btrace_cmdlist,
3129 "set record btrace ", 0, &set_record_cmdlist);
67b5c0c1 3130
0743fc83
TT
3131 add_show_prefix_cmd ("btrace", class_support,
3132 _("Show record options."), &show_record_btrace_cmdlist,
3133 "show record btrace ", 0, &show_record_cmdlist);
67b5c0c1
MM
3134
3135 add_setshow_enum_cmd ("replay-memory-access", no_class,
3136 replay_memory_access_types, &replay_memory_access, _("\
3137Set what memory accesses are allowed during replay."), _("\
3138Show what memory accesses are allowed during replay."),
3139 _("Default is READ-ONLY.\n\n\
3140The btrace record target does not trace data.\n\
3141The memory therefore corresponds to the live target and not \
3142to the current replay position.\n\n\
3143When READ-ONLY, allow accesses to read-only memory during replay.\n\
3144When READ-WRITE, allow accesses to read-only and read-write memory during \
3145replay."),
3146 NULL, cmd_show_replay_memory_access,
3147 &set_record_btrace_cmdlist,
3148 &show_record_btrace_cmdlist);
3149
4a4495d6
MM
3150 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3151 _("\
3152Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3153The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3154For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3155When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3156The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3157When GDB does not support that cpu, this option can be used to enable\n\
3158workarounds for a similar cpu that GDB supports.\n\n\
3159When set to \"none\", errata workarounds are disabled."),
3160 &set_record_btrace_cpu_cmdlist,
590042fc 3161 "set record btrace cpu ", 1,
4a4495d6
MM
3162 &set_record_btrace_cmdlist);
3163
3164 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3165Automatically determine the cpu to be used for trace decode."),
3166 &set_record_btrace_cpu_cmdlist);
3167
3168 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3169Do not enable errata workarounds for trace decode."),
3170 &set_record_btrace_cpu_cmdlist);
3171
3172 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3173Show the cpu to be used for trace decode."),
3174 &show_record_btrace_cmdlist);
3175
0743fc83
TT
3176 add_basic_prefix_cmd ("bts", class_support,
3177 _("Set record btrace bts options."),
3178 &set_record_btrace_bts_cmdlist,
3179 "set record btrace bts ", 0,
3180 &set_record_btrace_cmdlist);
d33501a5 3181
0743fc83
TT
3182 add_show_prefix_cmd ("bts", class_support,
3183 _("Show record btrace bts options."),
3184 &show_record_btrace_bts_cmdlist,
3185 "show record btrace bts ", 0,
3186 &show_record_btrace_cmdlist);
d33501a5
MM
3187
3188 add_setshow_uinteger_cmd ("buffer-size", no_class,
3189 &record_btrace_conf.bts.size,
3190 _("Set the record/replay bts buffer size."),
3191 _("Show the record/replay bts buffer size."), _("\
3192When starting recording request a trace buffer of this size. \
3193The actual buffer size may differ from the requested size. \
3194Use \"info record\" to see the actual buffer size.\n\n\
3195Bigger buffers allow longer recording but also take more time to process \
3196the recorded execution trace.\n\n\
b20a6524
MM
3197The trace buffer size may not be changed while recording."), NULL,
3198 show_record_bts_buffer_size_value,
d33501a5
MM
3199 &set_record_btrace_bts_cmdlist,
3200 &show_record_btrace_bts_cmdlist);
3201
0743fc83
TT
3202 add_basic_prefix_cmd ("pt", class_support,
3203 _("Set record btrace pt options."),
3204 &set_record_btrace_pt_cmdlist,
3205 "set record btrace pt ", 0,
3206 &set_record_btrace_cmdlist);
3207
3208 add_show_prefix_cmd ("pt", class_support,
3209 _("Show record btrace pt options."),
3210 &show_record_btrace_pt_cmdlist,
3211 "show record btrace pt ", 0,
3212 &show_record_btrace_cmdlist);
b20a6524
MM
3213
3214 add_setshow_uinteger_cmd ("buffer-size", no_class,
3215 &record_btrace_conf.pt.size,
3216 _("Set the record/replay pt buffer size."),
3217 _("Show the record/replay pt buffer size."), _("\
3218Bigger buffers allow longer recording but also take more time to process \
3219the recorded execution.\n\
3220The actual buffer size may differ from the requested size. Use \"info record\" \
3221to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3222 &set_record_btrace_pt_cmdlist,
3223 &show_record_btrace_pt_cmdlist);
3224
d9f719f1 3225 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3226
3227 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3228 xcalloc, xfree);
d33501a5
MM
3229
3230 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3231 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3232}