]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
gdb, btrace: diagnose double and failed enable
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
b811d2c2 3 Copyright (C) 2013-2020 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
76727919 29#include "observable.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
400b5eca 39#include "gdbsupport/event-loop.h"
70ad5bff 40#include "inf-loop.h"
00431a78 41#include "inferior.h"
325fac50 42#include <algorithm>
0d12e84c 43#include "gdbarch.h"
e43b10e1 44#include "cli/cli-style.h"
93b54c8e 45#include "async-event.h"
afedecd3 46
d9f719f1
PA
47static const target_info record_btrace_target_info = {
48 "record-btrace",
49 N_("Branch tracing target"),
50 N_("Collect control-flow trace and provide the execution history.")
51};
52
afedecd3 53/* The target_ops of record-btrace. */
f6ac5f3d
PA
54
55class record_btrace_target final : public target_ops
56{
57public:
d9f719f1
PA
58 const target_info &info () const override
59 { return record_btrace_target_info; }
f6ac5f3d 60
66b4deae
PA
61 strata stratum () const override { return record_stratum; }
62
f6ac5f3d
PA
63 void close () override;
64 void async (int) override;
65
66 void detach (inferior *inf, int from_tty) override
67 { record_detach (this, inf, from_tty); }
68
69 void disconnect (const char *, int) override;
70
71 void mourn_inferior () override
72 { record_mourn_inferior (this); }
73
74 void kill () override
75 { record_kill (this); }
76
77 enum record_method record_method (ptid_t ptid) override;
78
79 void stop_recording () override;
80 void info_record () override;
81
82 void insn_history (int size, gdb_disassembly_flags flags) override;
83 void insn_history_from (ULONGEST from, int size,
84 gdb_disassembly_flags flags) override;
85 void insn_history_range (ULONGEST begin, ULONGEST end,
86 gdb_disassembly_flags flags) override;
87 void call_history (int size, record_print_flags flags) override;
88 void call_history_from (ULONGEST begin, int size, record_print_flags flags)
89 override;
90 void call_history_range (ULONGEST begin, ULONGEST end, record_print_flags flags)
91 override;
92
57810aa7
PA
93 bool record_is_replaying (ptid_t ptid) override;
94 bool record_will_replay (ptid_t ptid, int dir) override;
f6ac5f3d
PA
95 void record_stop_replaying () override;
96
97 enum target_xfer_status xfer_partial (enum target_object object,
98 const char *annex,
99 gdb_byte *readbuf,
100 const gdb_byte *writebuf,
101 ULONGEST offset, ULONGEST len,
102 ULONGEST *xfered_len) override;
103
104 int insert_breakpoint (struct gdbarch *,
105 struct bp_target_info *) override;
106 int remove_breakpoint (struct gdbarch *, struct bp_target_info *,
107 enum remove_bp_reason) override;
108
109 void fetch_registers (struct regcache *, int) override;
110
111 void store_registers (struct regcache *, int) override;
112 void prepare_to_store (struct regcache *) override;
113
114 const struct frame_unwind *get_unwinder () override;
115
116 const struct frame_unwind *get_tailcall_unwinder () override;
117
118 void commit_resume () override;
119 void resume (ptid_t, int, enum gdb_signal) override;
120 ptid_t wait (ptid_t, struct target_waitstatus *, int) override;
121
122 void stop (ptid_t) override;
123 void update_thread_list () override;
57810aa7 124 bool thread_alive (ptid_t ptid) override;
f6ac5f3d
PA
125 void goto_record_begin () override;
126 void goto_record_end () override;
127 void goto_record (ULONGEST insn) override;
128
57810aa7 129 bool can_execute_reverse () override;
f6ac5f3d 130
57810aa7
PA
131 bool stopped_by_sw_breakpoint () override;
132 bool supports_stopped_by_sw_breakpoint () override;
f6ac5f3d 133
57810aa7
PA
134 bool stopped_by_hw_breakpoint () override;
135 bool supports_stopped_by_hw_breakpoint () override;
f6ac5f3d
PA
136
137 enum exec_direction_kind execution_direction () override;
138 void prepare_to_generate_core () override;
139 void done_generating_core () override;
140};
141
142static record_btrace_target record_btrace_ops;
143
144/* Initialize the record-btrace target ops. */
afedecd3 145
76727919
TT
146/* Token associated with a new-thread observer enabling branch tracing
147 for the new thread. */
3dcfdc58 148static const gdb::observers::token record_btrace_thread_observer_token {};
afedecd3 149
67b5c0c1
MM
150/* Memory access types used in set/show record btrace replay-memory-access. */
151static const char replay_memory_access_read_only[] = "read-only";
152static const char replay_memory_access_read_write[] = "read-write";
153static const char *const replay_memory_access_types[] =
154{
155 replay_memory_access_read_only,
156 replay_memory_access_read_write,
157 NULL
158};
159
160/* The currently allowed replay memory access type. */
161static const char *replay_memory_access = replay_memory_access_read_only;
162
4a4495d6
MM
163/* The cpu state kinds. */
164enum record_btrace_cpu_state_kind
165{
166 CS_AUTO,
167 CS_NONE,
168 CS_CPU
169};
170
171/* The current cpu state. */
172static enum record_btrace_cpu_state_kind record_btrace_cpu_state = CS_AUTO;
173
174/* The current cpu for trace decode. */
175static struct btrace_cpu record_btrace_cpu;
176
67b5c0c1
MM
177/* Command lists for "set/show record btrace". */
178static struct cmd_list_element *set_record_btrace_cmdlist;
179static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 180
70ad5bff
MM
181/* The execution direction of the last resume we got. See record-full.c. */
182static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
183
184/* The async event handler for reverse/replay execution. */
185static struct async_event_handler *record_btrace_async_inferior_event_handler;
186
aef92902
MM
187/* A flag indicating that we are currently generating a core file. */
188static int record_btrace_generating_corefile;
189
f4abbc16
MM
190/* The current branch trace configuration. */
191static struct btrace_config record_btrace_conf;
192
193/* Command list for "record btrace". */
194static struct cmd_list_element *record_btrace_cmdlist;
195
d33501a5
MM
196/* Command lists for "set/show record btrace bts". */
197static struct cmd_list_element *set_record_btrace_bts_cmdlist;
198static struct cmd_list_element *show_record_btrace_bts_cmdlist;
199
b20a6524
MM
200/* Command lists for "set/show record btrace pt". */
201static struct cmd_list_element *set_record_btrace_pt_cmdlist;
202static struct cmd_list_element *show_record_btrace_pt_cmdlist;
203
4a4495d6
MM
204/* Command list for "set record btrace cpu". */
205static struct cmd_list_element *set_record_btrace_cpu_cmdlist;
206
afedecd3
MM
207/* Print a record-btrace debug message. Use do ... while (0) to avoid
208 ambiguities when used in if statements. */
209
210#define DEBUG(msg, args...) \
211 do \
212 { \
213 if (record_debug != 0) \
214 fprintf_unfiltered (gdb_stdlog, \
215 "[record-btrace] " msg "\n", ##args); \
216 } \
217 while (0)
218
219
4a4495d6
MM
220/* Return the cpu configured by the user. Returns NULL if the cpu was
221 configured as auto. */
222const struct btrace_cpu *
223record_btrace_get_cpu (void)
224{
225 switch (record_btrace_cpu_state)
226 {
227 case CS_AUTO:
228 return nullptr;
229
230 case CS_NONE:
231 record_btrace_cpu.vendor = CV_UNKNOWN;
232 /* Fall through. */
233 case CS_CPU:
234 return &record_btrace_cpu;
235 }
236
237 error (_("Internal error: bad record btrace cpu state."));
238}
239
afedecd3 240/* Update the branch trace for the current thread and return a pointer to its
066ce621 241 thread_info.
afedecd3
MM
242
243 Throws an error if there is no thread or no trace. This function never
244 returns NULL. */
245
066ce621
MM
246static struct thread_info *
247require_btrace_thread (void)
afedecd3 248{
afedecd3
MM
249 DEBUG ("require");
250
00431a78 251 if (inferior_ptid == null_ptid)
afedecd3
MM
252 error (_("No thread."));
253
00431a78
PA
254 thread_info *tp = inferior_thread ();
255
cd4007e4
MM
256 validate_registers_access ();
257
4a4495d6 258 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 259
6e07b1d2 260 if (btrace_is_empty (tp))
afedecd3
MM
261 error (_("No trace."));
262
066ce621
MM
263 return tp;
264}
265
266/* Update the branch trace for the current thread and return a pointer to its
267 branch trace information struct.
268
269 Throws an error if there is no thread or no trace. This function never
270 returns NULL. */
271
272static struct btrace_thread_info *
273require_btrace (void)
274{
275 struct thread_info *tp;
276
277 tp = require_btrace_thread ();
278
279 return &tp->btrace;
afedecd3
MM
280}
281
282/* Enable branch tracing for one thread. Warn on errors. */
283
284static void
285record_btrace_enable_warn (struct thread_info *tp)
286{
a70b8144 287 try
492d29ea
PA
288 {
289 btrace_enable (tp, &record_btrace_conf);
290 }
230d2906 291 catch (const gdb_exception_error &error)
492d29ea 292 {
3d6e9d23 293 warning ("%s", error.what ());
492d29ea 294 }
afedecd3
MM
295}
296
afedecd3
MM
297/* Enable automatic tracing of new threads. */
298
299static void
300record_btrace_auto_enable (void)
301{
302 DEBUG ("attach thread observer");
303
76727919
TT
304 gdb::observers::new_thread.attach (record_btrace_enable_warn,
305 record_btrace_thread_observer_token);
afedecd3
MM
306}
307
308/* Disable automatic tracing of new threads. */
309
310static void
311record_btrace_auto_disable (void)
312{
afedecd3
MM
313 DEBUG ("detach thread observer");
314
76727919 315 gdb::observers::new_thread.detach (record_btrace_thread_observer_token);
afedecd3
MM
316}
317
70ad5bff
MM
318/* The record-btrace async event handler function. */
319
320static void
321record_btrace_handle_async_inferior_event (gdb_client_data data)
322{
323 inferior_event_handler (INF_REG_EVENT, NULL);
324}
325
c0272db5
TW
326/* See record-btrace.h. */
327
328void
329record_btrace_push_target (void)
330{
331 const char *format;
332
333 record_btrace_auto_enable ();
334
335 push_target (&record_btrace_ops);
336
337 record_btrace_async_inferior_event_handler
338 = create_async_event_handler (record_btrace_handle_async_inferior_event,
339 NULL);
340 record_btrace_generating_corefile = 0;
341
342 format = btrace_format_short_string (record_btrace_conf.format);
76727919 343 gdb::observers::record_changed.notify (current_inferior (), 1, "btrace", format);
c0272db5
TW
344}
345
228f1508
SM
346/* Disable btrace on a set of threads on scope exit. */
347
348struct scoped_btrace_disable
349{
350 scoped_btrace_disable () = default;
351
352 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
353
354 ~scoped_btrace_disable ()
355 {
356 for (thread_info *tp : m_threads)
357 btrace_disable (tp);
358 }
359
360 void add_thread (thread_info *thread)
361 {
362 m_threads.push_front (thread);
363 }
364
365 void discard ()
366 {
367 m_threads.clear ();
368 }
369
370private:
371 std::forward_list<thread_info *> m_threads;
372};
373
d9f719f1 374/* Open target record-btrace. */
afedecd3 375
d9f719f1
PA
376static void
377record_btrace_target_open (const char *args, int from_tty)
afedecd3 378{
228f1508
SM
379 /* If we fail to enable btrace for one thread, disable it for the threads for
380 which it was successfully enabled. */
381 scoped_btrace_disable btrace_disable;
afedecd3
MM
382
383 DEBUG ("open");
384
8213266a 385 record_preopen ();
afedecd3
MM
386
387 if (!target_has_execution)
388 error (_("The program is not being run."));
389
08036331 390 for (thread_info *tp : all_non_exited_threads ())
5d5658a1 391 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 392 {
f4abbc16 393 btrace_enable (tp, &record_btrace_conf);
afedecd3 394
228f1508 395 btrace_disable.add_thread (tp);
afedecd3
MM
396 }
397
c0272db5 398 record_btrace_push_target ();
afedecd3 399
228f1508 400 btrace_disable.discard ();
afedecd3
MM
401}
402
f6ac5f3d 403/* The stop_recording method of target record-btrace. */
afedecd3 404
f6ac5f3d
PA
405void
406record_btrace_target::stop_recording ()
afedecd3 407{
afedecd3
MM
408 DEBUG ("stop recording");
409
410 record_btrace_auto_disable ();
411
08036331 412 for (thread_info *tp : all_non_exited_threads ())
afedecd3
MM
413 if (tp->btrace.target != NULL)
414 btrace_disable (tp);
415}
416
f6ac5f3d 417/* The disconnect method of target record-btrace. */
c0272db5 418
f6ac5f3d
PA
419void
420record_btrace_target::disconnect (const char *args,
421 int from_tty)
c0272db5 422{
b6a8c27b 423 struct target_ops *beneath = this->beneath ();
c0272db5
TW
424
425 /* Do not stop recording, just clean up GDB side. */
f6ac5f3d 426 unpush_target (this);
c0272db5
TW
427
428 /* Forward disconnect. */
f6ac5f3d 429 beneath->disconnect (args, from_tty);
c0272db5
TW
430}
431
f6ac5f3d 432/* The close method of target record-btrace. */
afedecd3 433
f6ac5f3d
PA
434void
435record_btrace_target::close ()
afedecd3 436{
70ad5bff
MM
437 if (record_btrace_async_inferior_event_handler != NULL)
438 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
439
99c819ee
MM
440 /* Make sure automatic recording gets disabled even if we did not stop
441 recording before closing the record-btrace target. */
442 record_btrace_auto_disable ();
443
568e808b
MM
444 /* We should have already stopped recording.
445 Tear down btrace in case we have not. */
08036331 446 for (thread_info *tp : all_non_exited_threads ())
568e808b 447 btrace_teardown (tp);
afedecd3
MM
448}
449
f6ac5f3d 450/* The async method of target record-btrace. */
b7d2e916 451
f6ac5f3d
PA
452void
453record_btrace_target::async (int enable)
b7d2e916 454{
6a3753b3 455 if (enable)
b7d2e916
PA
456 mark_async_event_handler (record_btrace_async_inferior_event_handler);
457 else
458 clear_async_event_handler (record_btrace_async_inferior_event_handler);
459
b6a8c27b 460 this->beneath ()->async (enable);
b7d2e916
PA
461}
462
d33501a5
MM
463/* Adjusts the size and returns a human readable size suffix. */
464
465static const char *
466record_btrace_adjust_size (unsigned int *size)
467{
468 unsigned int sz;
469
470 sz = *size;
471
472 if ((sz & ((1u << 30) - 1)) == 0)
473 {
474 *size = sz >> 30;
475 return "GB";
476 }
477 else if ((sz & ((1u << 20) - 1)) == 0)
478 {
479 *size = sz >> 20;
480 return "MB";
481 }
482 else if ((sz & ((1u << 10) - 1)) == 0)
483 {
484 *size = sz >> 10;
485 return "kB";
486 }
487 else
488 return "";
489}
490
491/* Print a BTS configuration. */
492
493static void
494record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
495{
496 const char *suffix;
497 unsigned int size;
498
499 size = conf->size;
500 if (size > 0)
501 {
502 suffix = record_btrace_adjust_size (&size);
503 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
504 }
505}
506
bc504a31 507/* Print an Intel Processor Trace configuration. */
b20a6524
MM
508
509static void
510record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
511{
512 const char *suffix;
513 unsigned int size;
514
515 size = conf->size;
516 if (size > 0)
517 {
518 suffix = record_btrace_adjust_size (&size);
519 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
520 }
521}
522
d33501a5
MM
523/* Print a branch tracing configuration. */
524
525static void
526record_btrace_print_conf (const struct btrace_config *conf)
527{
528 printf_unfiltered (_("Recording format: %s.\n"),
529 btrace_format_string (conf->format));
530
531 switch (conf->format)
532 {
533 case BTRACE_FORMAT_NONE:
534 return;
535
536 case BTRACE_FORMAT_BTS:
537 record_btrace_print_bts_conf (&conf->bts);
538 return;
b20a6524
MM
539
540 case BTRACE_FORMAT_PT:
541 record_btrace_print_pt_conf (&conf->pt);
542 return;
d33501a5
MM
543 }
544
40c94099 545 internal_error (__FILE__, __LINE__, _("Unknown branch trace format."));
d33501a5
MM
546}
547
f6ac5f3d 548/* The info_record method of target record-btrace. */
afedecd3 549
f6ac5f3d
PA
550void
551record_btrace_target::info_record ()
afedecd3
MM
552{
553 struct btrace_thread_info *btinfo;
f4abbc16 554 const struct btrace_config *conf;
afedecd3 555 struct thread_info *tp;
31fd9caa 556 unsigned int insns, calls, gaps;
afedecd3
MM
557
558 DEBUG ("info");
559
5b6d1e4f 560 if (inferior_ptid == null_ptid)
afedecd3
MM
561 error (_("No thread."));
562
5b6d1e4f
PA
563 tp = inferior_thread ();
564
cd4007e4
MM
565 validate_registers_access ();
566
f4abbc16
MM
567 btinfo = &tp->btrace;
568
f6ac5f3d 569 conf = ::btrace_conf (btinfo);
f4abbc16 570 if (conf != NULL)
d33501a5 571 record_btrace_print_conf (conf);
f4abbc16 572
4a4495d6 573 btrace_fetch (tp, record_btrace_get_cpu ());
afedecd3 574
23a7fe75
MM
575 insns = 0;
576 calls = 0;
31fd9caa 577 gaps = 0;
23a7fe75 578
6e07b1d2 579 if (!btrace_is_empty (tp))
23a7fe75
MM
580 {
581 struct btrace_call_iterator call;
582 struct btrace_insn_iterator insn;
583
584 btrace_call_end (&call, btinfo);
585 btrace_call_prev (&call, 1);
5de9129b 586 calls = btrace_call_number (&call);
23a7fe75
MM
587
588 btrace_insn_end (&insn, btinfo);
5de9129b 589 insns = btrace_insn_number (&insn);
31fd9caa 590
69090cee
TW
591 /* If the last instruction is not a gap, it is the current instruction
592 that is not actually part of the record. */
593 if (btrace_insn_get (&insn) != NULL)
594 insns -= 1;
31fd9caa
MM
595
596 gaps = btinfo->ngaps;
23a7fe75 597 }
afedecd3 598
31fd9caa 599 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0 600 "for thread %s (%s).\n"), insns, calls, gaps,
a068643d
TT
601 print_thread_id (tp),
602 target_pid_to_str (tp->ptid).c_str ());
07bbe694
MM
603
604 if (btrace_is_replaying (tp))
605 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
606 btrace_insn_number (btinfo->replay));
afedecd3
MM
607}
608
31fd9caa
MM
609/* Print a decode error. */
610
611static void
612btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
613 enum btrace_format format)
614{
508352a9 615 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 616
112e8700 617 uiout->text (_("["));
508352a9
TW
618 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
619 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 620 {
112e8700 621 uiout->text (_("decode error ("));
381befee 622 uiout->field_signed ("errcode", errcode);
112e8700 623 uiout->text (_("): "));
31fd9caa 624 }
112e8700
SM
625 uiout->text (errstr);
626 uiout->text (_("]\n"));
31fd9caa
MM
627}
628
f94cc897
MM
629/* A range of source lines. */
630
631struct btrace_line_range
632{
633 /* The symtab this line is from. */
634 struct symtab *symtab;
635
636 /* The first line (inclusive). */
637 int begin;
638
639 /* The last line (exclusive). */
640 int end;
641};
642
643/* Construct a line range. */
644
645static struct btrace_line_range
646btrace_mk_line_range (struct symtab *symtab, int begin, int end)
647{
648 struct btrace_line_range range;
649
650 range.symtab = symtab;
651 range.begin = begin;
652 range.end = end;
653
654 return range;
655}
656
657/* Add a line to a line range. */
658
659static struct btrace_line_range
660btrace_line_range_add (struct btrace_line_range range, int line)
661{
662 if (range.end <= range.begin)
663 {
664 /* This is the first entry. */
665 range.begin = line;
666 range.end = line + 1;
667 }
668 else if (line < range.begin)
669 range.begin = line;
670 else if (range.end < line)
671 range.end = line;
672
673 return range;
674}
675
676/* Return non-zero if RANGE is empty, zero otherwise. */
677
678static int
679btrace_line_range_is_empty (struct btrace_line_range range)
680{
681 return range.end <= range.begin;
682}
683
684/* Return non-zero if LHS contains RHS, zero otherwise. */
685
686static int
687btrace_line_range_contains_range (struct btrace_line_range lhs,
688 struct btrace_line_range rhs)
689{
690 return ((lhs.symtab == rhs.symtab)
691 && (lhs.begin <= rhs.begin)
692 && (rhs.end <= lhs.end));
693}
694
695/* Find the line range associated with PC. */
696
697static struct btrace_line_range
698btrace_find_line_range (CORE_ADDR pc)
699{
700 struct btrace_line_range range;
701 struct linetable_entry *lines;
702 struct linetable *ltable;
703 struct symtab *symtab;
704 int nlines, i;
705
706 symtab = find_pc_line_symtab (pc);
707 if (symtab == NULL)
708 return btrace_mk_line_range (NULL, 0, 0);
709
710 ltable = SYMTAB_LINETABLE (symtab);
711 if (ltable == NULL)
712 return btrace_mk_line_range (symtab, 0, 0);
713
714 nlines = ltable->nitems;
715 lines = ltable->item;
716 if (nlines <= 0)
717 return btrace_mk_line_range (symtab, 0, 0);
718
719 range = btrace_mk_line_range (symtab, 0, 0);
720 for (i = 0; i < nlines - 1; i++)
721 {
8c95582d
AB
722 /* The test of is_stmt here was added when the is_stmt field was
723 introduced to the 'struct linetable_entry' structure. This
724 ensured that this loop maintained the same behaviour as before we
725 introduced is_stmt. That said, it might be that we would be
726 better off not checking is_stmt here, this would lead to us
727 possibly adding more line numbers to the range. At the time this
728 change was made I was unsure how to test this so chose to go with
729 maintaining the existing experience. */
730 if ((lines[i].pc == pc) && (lines[i].line != 0)
731 && (lines[i].is_stmt == 1))
f94cc897
MM
732 range = btrace_line_range_add (range, lines[i].line);
733 }
734
735 return range;
736}
737
738/* Print source lines in LINES to UIOUT.
739
740 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
741 instructions corresponding to that source line. When printing a new source
742 line, we do the cleanups for the open chain and open a new cleanup chain for
743 the new source line. If the source line range in LINES is not empty, this
744 function will leave the cleanup chain for the last printed source line open
745 so instructions can be added to it. */
746
747static void
748btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
7ea78b59
SM
749 gdb::optional<ui_out_emit_tuple> *src_and_asm_tuple,
750 gdb::optional<ui_out_emit_list> *asm_list,
751 gdb_disassembly_flags flags)
f94cc897 752{
8d297bbf 753 print_source_lines_flags psl_flags;
f94cc897 754
f94cc897
MM
755 if (flags & DISASSEMBLY_FILENAME)
756 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
757
7ea78b59 758 for (int line = lines.begin; line < lines.end; ++line)
f94cc897 759 {
7ea78b59 760 asm_list->reset ();
f94cc897 761
7ea78b59 762 src_and_asm_tuple->emplace (uiout, "src_and_asm_line");
f94cc897
MM
763
764 print_source_lines (lines.symtab, line, line + 1, psl_flags);
765
7ea78b59 766 asm_list->emplace (uiout, "line_asm_insn");
f94cc897
MM
767 }
768}
769
afedecd3
MM
770/* Disassemble a section of the recorded instruction trace. */
771
772static void
23a7fe75 773btrace_insn_history (struct ui_out *uiout,
31fd9caa 774 const struct btrace_thread_info *btinfo,
23a7fe75 775 const struct btrace_insn_iterator *begin,
9a24775b
PA
776 const struct btrace_insn_iterator *end,
777 gdb_disassembly_flags flags)
afedecd3 778{
9a24775b
PA
779 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
780 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 781
f94cc897
MM
782 flags |= DISASSEMBLY_SPECULATIVE;
783
7ea78b59
SM
784 struct gdbarch *gdbarch = target_gdbarch ();
785 btrace_line_range last_lines = btrace_mk_line_range (NULL, 0, 0);
f94cc897 786
7ea78b59 787 ui_out_emit_list list_emitter (uiout, "asm_insns");
f94cc897 788
7ea78b59
SM
789 gdb::optional<ui_out_emit_tuple> src_and_asm_tuple;
790 gdb::optional<ui_out_emit_list> asm_list;
afedecd3 791
046bebe1 792 gdb_pretty_print_disassembler disasm (gdbarch, uiout);
8b172ce7 793
7ea78b59
SM
794 for (btrace_insn_iterator it = *begin; btrace_insn_cmp (&it, end) != 0;
795 btrace_insn_next (&it, 1))
afedecd3 796 {
23a7fe75
MM
797 const struct btrace_insn *insn;
798
799 insn = btrace_insn_get (&it);
800
31fd9caa
MM
801 /* A NULL instruction indicates a gap in the trace. */
802 if (insn == NULL)
803 {
804 const struct btrace_config *conf;
805
806 conf = btrace_conf (btinfo);
afedecd3 807
31fd9caa
MM
808 /* We have trace so we must have a configuration. */
809 gdb_assert (conf != NULL);
810
69090cee
TW
811 uiout->field_fmt ("insn-number", "%u",
812 btrace_insn_number (&it));
813 uiout->text ("\t");
814
815 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
816 conf->format);
817 }
818 else
819 {
f94cc897 820 struct disasm_insn dinsn;
da8c46d2 821
f94cc897 822 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 823 {
f94cc897
MM
824 struct btrace_line_range lines;
825
826 lines = btrace_find_line_range (insn->pc);
827 if (!btrace_line_range_is_empty (lines)
828 && !btrace_line_range_contains_range (last_lines, lines))
829 {
7ea78b59
SM
830 btrace_print_lines (lines, uiout, &src_and_asm_tuple, &asm_list,
831 flags);
f94cc897
MM
832 last_lines = lines;
833 }
7ea78b59 834 else if (!src_and_asm_tuple.has_value ())
f94cc897 835 {
7ea78b59
SM
836 gdb_assert (!asm_list.has_value ());
837
838 src_and_asm_tuple.emplace (uiout, "src_and_asm_line");
839
f94cc897 840 /* No source information. */
7ea78b59 841 asm_list.emplace (uiout, "line_asm_insn");
f94cc897
MM
842 }
843
7ea78b59
SM
844 gdb_assert (src_and_asm_tuple.has_value ());
845 gdb_assert (asm_list.has_value ());
da8c46d2 846 }
da8c46d2 847
f94cc897
MM
848 memset (&dinsn, 0, sizeof (dinsn));
849 dinsn.number = btrace_insn_number (&it);
850 dinsn.addr = insn->pc;
31fd9caa 851
da8c46d2 852 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 853 dinsn.is_speculative = 1;
da8c46d2 854
046bebe1 855 disasm.pretty_print_insn (&dinsn, flags);
31fd9caa 856 }
afedecd3
MM
857 }
858}
859
f6ac5f3d 860/* The insn_history method of target record-btrace. */
afedecd3 861
f6ac5f3d
PA
862void
863record_btrace_target::insn_history (int size, gdb_disassembly_flags flags)
afedecd3
MM
864{
865 struct btrace_thread_info *btinfo;
23a7fe75
MM
866 struct btrace_insn_history *history;
867 struct btrace_insn_iterator begin, end;
afedecd3 868 struct ui_out *uiout;
23a7fe75 869 unsigned int context, covered;
afedecd3
MM
870
871 uiout = current_uiout;
2e783024 872 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 873 context = abs (size);
afedecd3
MM
874 if (context == 0)
875 error (_("Bad record instruction-history-size."));
876
23a7fe75
MM
877 btinfo = require_btrace ();
878 history = btinfo->insn_history;
879 if (history == NULL)
afedecd3 880 {
07bbe694 881 struct btrace_insn_iterator *replay;
afedecd3 882
9a24775b 883 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 884
07bbe694
MM
885 /* If we're replaying, we start at the replay position. Otherwise, we
886 start at the tail of the trace. */
887 replay = btinfo->replay;
888 if (replay != NULL)
889 begin = *replay;
890 else
891 btrace_insn_end (&begin, btinfo);
892
893 /* We start from here and expand in the requested direction. Then we
894 expand in the other direction, as well, to fill up any remaining
895 context. */
896 end = begin;
897 if (size < 0)
898 {
899 /* We want the current position covered, as well. */
900 covered = btrace_insn_next (&end, 1);
901 covered += btrace_insn_prev (&begin, context - covered);
902 covered += btrace_insn_next (&end, context - covered);
903 }
904 else
905 {
906 covered = btrace_insn_next (&end, context);
907 covered += btrace_insn_prev (&begin, context - covered);
908 }
afedecd3
MM
909 }
910 else
911 {
23a7fe75
MM
912 begin = history->begin;
913 end = history->end;
afedecd3 914
9a24775b 915 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 916 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 917
23a7fe75
MM
918 if (size < 0)
919 {
920 end = begin;
921 covered = btrace_insn_prev (&begin, context);
922 }
923 else
924 {
925 begin = end;
926 covered = btrace_insn_next (&end, context);
927 }
afedecd3
MM
928 }
929
23a7fe75 930 if (covered > 0)
31fd9caa 931 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
932 else
933 {
934 if (size < 0)
935 printf_unfiltered (_("At the start of the branch trace record.\n"));
936 else
937 printf_unfiltered (_("At the end of the branch trace record.\n"));
938 }
afedecd3 939
23a7fe75 940 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
941}
942
f6ac5f3d 943/* The insn_history_range method of target record-btrace. */
afedecd3 944
f6ac5f3d
PA
945void
946record_btrace_target::insn_history_range (ULONGEST from, ULONGEST to,
947 gdb_disassembly_flags flags)
afedecd3
MM
948{
949 struct btrace_thread_info *btinfo;
23a7fe75 950 struct btrace_insn_iterator begin, end;
afedecd3 951 struct ui_out *uiout;
23a7fe75
MM
952 unsigned int low, high;
953 int found;
afedecd3
MM
954
955 uiout = current_uiout;
2e783024 956 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
957 low = from;
958 high = to;
afedecd3 959
9a24775b 960 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
961
962 /* Check for wrap-arounds. */
23a7fe75 963 if (low != from || high != to)
afedecd3
MM
964 error (_("Bad range."));
965
0688d04e 966 if (high < low)
afedecd3
MM
967 error (_("Bad range."));
968
23a7fe75 969 btinfo = require_btrace ();
afedecd3 970
23a7fe75
MM
971 found = btrace_find_insn_by_number (&begin, btinfo, low);
972 if (found == 0)
973 error (_("Range out of bounds."));
afedecd3 974
23a7fe75
MM
975 found = btrace_find_insn_by_number (&end, btinfo, high);
976 if (found == 0)
0688d04e
MM
977 {
978 /* Silently truncate the range. */
979 btrace_insn_end (&end, btinfo);
980 }
981 else
982 {
983 /* We want both begin and end to be inclusive. */
984 btrace_insn_next (&end, 1);
985 }
afedecd3 986
31fd9caa 987 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 988 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
989}
990
f6ac5f3d 991/* The insn_history_from method of target record-btrace. */
afedecd3 992
f6ac5f3d
PA
993void
994record_btrace_target::insn_history_from (ULONGEST from, int size,
995 gdb_disassembly_flags flags)
afedecd3
MM
996{
997 ULONGEST begin, end, context;
998
999 context = abs (size);
0688d04e
MM
1000 if (context == 0)
1001 error (_("Bad record instruction-history-size."));
afedecd3
MM
1002
1003 if (size < 0)
1004 {
1005 end = from;
1006
1007 if (from < context)
1008 begin = 0;
1009 else
0688d04e 1010 begin = from - context + 1;
afedecd3
MM
1011 }
1012 else
1013 {
1014 begin = from;
0688d04e 1015 end = from + context - 1;
afedecd3
MM
1016
1017 /* Check for wrap-around. */
1018 if (end < begin)
1019 end = ULONGEST_MAX;
1020 }
1021
f6ac5f3d 1022 insn_history_range (begin, end, flags);
afedecd3
MM
1023}
1024
1025/* Print the instruction number range for a function call history line. */
1026
1027static void
23a7fe75
MM
1028btrace_call_history_insn_range (struct ui_out *uiout,
1029 const struct btrace_function *bfun)
afedecd3 1030{
7acbe133
MM
1031 unsigned int begin, end, size;
1032
0860c437 1033 size = bfun->insn.size ();
7acbe133 1034 gdb_assert (size > 0);
afedecd3 1035
23a7fe75 1036 begin = bfun->insn_offset;
7acbe133 1037 end = begin + size - 1;
afedecd3 1038
1f77b012 1039 uiout->field_unsigned ("insn begin", begin);
112e8700 1040 uiout->text (",");
1f77b012 1041 uiout->field_unsigned ("insn end", end);
afedecd3
MM
1042}
1043
ce0dfbea
MM
1044/* Compute the lowest and highest source line for the instructions in BFUN
1045 and return them in PBEGIN and PEND.
1046 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
1047 result from inlining or macro expansion. */
1048
1049static void
1050btrace_compute_src_line_range (const struct btrace_function *bfun,
1051 int *pbegin, int *pend)
1052{
ce0dfbea
MM
1053 struct symtab *symtab;
1054 struct symbol *sym;
ce0dfbea
MM
1055 int begin, end;
1056
1057 begin = INT_MAX;
1058 end = INT_MIN;
1059
1060 sym = bfun->sym;
1061 if (sym == NULL)
1062 goto out;
1063
1064 symtab = symbol_symtab (sym);
1065
0860c437 1066 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
1067 {
1068 struct symtab_and_line sal;
1069
0860c437 1070 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
1071 if (sal.symtab != symtab || sal.line == 0)
1072 continue;
1073
325fac50
PA
1074 begin = std::min (begin, sal.line);
1075 end = std::max (end, sal.line);
ce0dfbea
MM
1076 }
1077
1078 out:
1079 *pbegin = begin;
1080 *pend = end;
1081}
1082
afedecd3
MM
1083/* Print the source line information for a function call history line. */
1084
1085static void
23a7fe75
MM
1086btrace_call_history_src_line (struct ui_out *uiout,
1087 const struct btrace_function *bfun)
afedecd3
MM
1088{
1089 struct symbol *sym;
23a7fe75 1090 int begin, end;
afedecd3
MM
1091
1092 sym = bfun->sym;
1093 if (sym == NULL)
1094 return;
1095
112e8700 1096 uiout->field_string ("file",
cbe56571 1097 symtab_to_filename_for_display (symbol_symtab (sym)),
e43b10e1 1098 file_name_style.style ());
afedecd3 1099
ce0dfbea 1100 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1101 if (end < begin)
afedecd3
MM
1102 return;
1103
112e8700 1104 uiout->text (":");
381befee 1105 uiout->field_signed ("min line", begin);
afedecd3 1106
23a7fe75 1107 if (end == begin)
afedecd3
MM
1108 return;
1109
112e8700 1110 uiout->text (",");
381befee 1111 uiout->field_signed ("max line", end);
afedecd3
MM
1112}
1113
0b722aec
MM
1114/* Get the name of a branch trace function. */
1115
1116static const char *
1117btrace_get_bfun_name (const struct btrace_function *bfun)
1118{
1119 struct minimal_symbol *msym;
1120 struct symbol *sym;
1121
1122 if (bfun == NULL)
1123 return "??";
1124
1125 msym = bfun->msym;
1126 sym = bfun->sym;
1127
1128 if (sym != NULL)
987012b8 1129 return sym->print_name ();
0b722aec 1130 else if (msym != NULL)
c9d95fa3 1131 return msym->print_name ();
0b722aec
MM
1132 else
1133 return "??";
1134}
1135
afedecd3
MM
1136/* Disassemble a section of the recorded function trace. */
1137
1138static void
23a7fe75 1139btrace_call_history (struct ui_out *uiout,
8710b709 1140 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1141 const struct btrace_call_iterator *begin,
1142 const struct btrace_call_iterator *end,
8d297bbf 1143 int int_flags)
afedecd3 1144{
23a7fe75 1145 struct btrace_call_iterator it;
8d297bbf 1146 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1147
8d297bbf 1148 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1149 btrace_call_number (end));
afedecd3 1150
23a7fe75 1151 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1152 {
23a7fe75
MM
1153 const struct btrace_function *bfun;
1154 struct minimal_symbol *msym;
1155 struct symbol *sym;
1156
1157 bfun = btrace_call_get (&it);
23a7fe75 1158 sym = bfun->sym;
0b722aec 1159 msym = bfun->msym;
23a7fe75 1160
afedecd3 1161 /* Print the function index. */
1f77b012 1162 uiout->field_unsigned ("index", bfun->number);
112e8700 1163 uiout->text ("\t");
afedecd3 1164
31fd9caa
MM
1165 /* Indicate gaps in the trace. */
1166 if (bfun->errcode != 0)
1167 {
1168 const struct btrace_config *conf;
1169
1170 conf = btrace_conf (btinfo);
1171
1172 /* We have trace so we must have a configuration. */
1173 gdb_assert (conf != NULL);
1174
1175 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1176
1177 continue;
1178 }
1179
8710b709
MM
1180 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1181 {
1182 int level = bfun->level + btinfo->level, i;
1183
1184 for (i = 0; i < level; ++i)
112e8700 1185 uiout->text (" ");
8710b709
MM
1186 }
1187
1188 if (sym != NULL)
987012b8 1189 uiout->field_string ("function", sym->print_name (),
e43b10e1 1190 function_name_style.style ());
8710b709 1191 else if (msym != NULL)
c9d95fa3 1192 uiout->field_string ("function", msym->print_name (),
e43b10e1 1193 function_name_style.style ());
112e8700 1194 else if (!uiout->is_mi_like_p ())
cbe56571 1195 uiout->field_string ("function", "??",
e43b10e1 1196 function_name_style.style ());
8710b709 1197
1e038f67 1198 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1199 {
112e8700 1200 uiout->text (_("\tinst "));
23a7fe75 1201 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1202 }
1203
1e038f67 1204 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1205 {
112e8700 1206 uiout->text (_("\tat "));
23a7fe75 1207 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1208 }
1209
112e8700 1210 uiout->text ("\n");
afedecd3
MM
1211 }
1212}
1213
f6ac5f3d 1214/* The call_history method of target record-btrace. */
afedecd3 1215
f6ac5f3d
PA
1216void
1217record_btrace_target::call_history (int size, record_print_flags flags)
afedecd3
MM
1218{
1219 struct btrace_thread_info *btinfo;
23a7fe75
MM
1220 struct btrace_call_history *history;
1221 struct btrace_call_iterator begin, end;
afedecd3 1222 struct ui_out *uiout;
23a7fe75 1223 unsigned int context, covered;
afedecd3
MM
1224
1225 uiout = current_uiout;
2e783024 1226 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1227 context = abs (size);
afedecd3
MM
1228 if (context == 0)
1229 error (_("Bad record function-call-history-size."));
1230
23a7fe75
MM
1231 btinfo = require_btrace ();
1232 history = btinfo->call_history;
1233 if (history == NULL)
afedecd3 1234 {
07bbe694 1235 struct btrace_insn_iterator *replay;
afedecd3 1236
0cb7c7b0 1237 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1238
07bbe694
MM
1239 /* If we're replaying, we start at the replay position. Otherwise, we
1240 start at the tail of the trace. */
1241 replay = btinfo->replay;
1242 if (replay != NULL)
1243 {
07bbe694 1244 begin.btinfo = btinfo;
a0f1b963 1245 begin.index = replay->call_index;
07bbe694
MM
1246 }
1247 else
1248 btrace_call_end (&begin, btinfo);
1249
1250 /* We start from here and expand in the requested direction. Then we
1251 expand in the other direction, as well, to fill up any remaining
1252 context. */
1253 end = begin;
1254 if (size < 0)
1255 {
1256 /* We want the current position covered, as well. */
1257 covered = btrace_call_next (&end, 1);
1258 covered += btrace_call_prev (&begin, context - covered);
1259 covered += btrace_call_next (&end, context - covered);
1260 }
1261 else
1262 {
1263 covered = btrace_call_next (&end, context);
1264 covered += btrace_call_prev (&begin, context- covered);
1265 }
afedecd3
MM
1266 }
1267 else
1268 {
23a7fe75
MM
1269 begin = history->begin;
1270 end = history->end;
afedecd3 1271
0cb7c7b0 1272 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1273 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1274
23a7fe75
MM
1275 if (size < 0)
1276 {
1277 end = begin;
1278 covered = btrace_call_prev (&begin, context);
1279 }
1280 else
1281 {
1282 begin = end;
1283 covered = btrace_call_next (&end, context);
1284 }
afedecd3
MM
1285 }
1286
23a7fe75 1287 if (covered > 0)
8710b709 1288 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1289 else
1290 {
1291 if (size < 0)
1292 printf_unfiltered (_("At the start of the branch trace record.\n"));
1293 else
1294 printf_unfiltered (_("At the end of the branch trace record.\n"));
1295 }
afedecd3 1296
23a7fe75 1297 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1298}
1299
f6ac5f3d 1300/* The call_history_range method of target record-btrace. */
afedecd3 1301
f6ac5f3d
PA
1302void
1303record_btrace_target::call_history_range (ULONGEST from, ULONGEST to,
1304 record_print_flags flags)
afedecd3
MM
1305{
1306 struct btrace_thread_info *btinfo;
23a7fe75 1307 struct btrace_call_iterator begin, end;
afedecd3 1308 struct ui_out *uiout;
23a7fe75
MM
1309 unsigned int low, high;
1310 int found;
afedecd3
MM
1311
1312 uiout = current_uiout;
2e783024 1313 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1314 low = from;
1315 high = to;
afedecd3 1316
0cb7c7b0 1317 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1318
1319 /* Check for wrap-arounds. */
23a7fe75 1320 if (low != from || high != to)
afedecd3
MM
1321 error (_("Bad range."));
1322
0688d04e 1323 if (high < low)
afedecd3
MM
1324 error (_("Bad range."));
1325
23a7fe75 1326 btinfo = require_btrace ();
afedecd3 1327
23a7fe75
MM
1328 found = btrace_find_call_by_number (&begin, btinfo, low);
1329 if (found == 0)
1330 error (_("Range out of bounds."));
afedecd3 1331
23a7fe75
MM
1332 found = btrace_find_call_by_number (&end, btinfo, high);
1333 if (found == 0)
0688d04e
MM
1334 {
1335 /* Silently truncate the range. */
1336 btrace_call_end (&end, btinfo);
1337 }
1338 else
1339 {
1340 /* We want both begin and end to be inclusive. */
1341 btrace_call_next (&end, 1);
1342 }
afedecd3 1343
8710b709 1344 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1345 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1346}
1347
f6ac5f3d 1348/* The call_history_from method of target record-btrace. */
afedecd3 1349
f6ac5f3d
PA
1350void
1351record_btrace_target::call_history_from (ULONGEST from, int size,
1352 record_print_flags flags)
afedecd3
MM
1353{
1354 ULONGEST begin, end, context;
1355
1356 context = abs (size);
0688d04e
MM
1357 if (context == 0)
1358 error (_("Bad record function-call-history-size."));
afedecd3
MM
1359
1360 if (size < 0)
1361 {
1362 end = from;
1363
1364 if (from < context)
1365 begin = 0;
1366 else
0688d04e 1367 begin = from - context + 1;
afedecd3
MM
1368 }
1369 else
1370 {
1371 begin = from;
0688d04e 1372 end = from + context - 1;
afedecd3
MM
1373
1374 /* Check for wrap-around. */
1375 if (end < begin)
1376 end = ULONGEST_MAX;
1377 }
1378
f6ac5f3d 1379 call_history_range ( begin, end, flags);
afedecd3
MM
1380}
1381
f6ac5f3d 1382/* The record_method method of target record-btrace. */
b158a20f 1383
f6ac5f3d
PA
1384enum record_method
1385record_btrace_target::record_method (ptid_t ptid)
b158a20f 1386{
5b6d1e4f
PA
1387 process_stratum_target *proc_target = current_inferior ()->process_target ();
1388 thread_info *const tp = find_thread_ptid (proc_target, ptid);
b158a20f
TW
1389
1390 if (tp == NULL)
1391 error (_("No thread."));
1392
1393 if (tp->btrace.target == NULL)
1394 return RECORD_METHOD_NONE;
1395
1396 return RECORD_METHOD_BTRACE;
1397}
1398
f6ac5f3d 1399/* The record_is_replaying method of target record-btrace. */
07bbe694 1400
57810aa7 1401bool
f6ac5f3d 1402record_btrace_target::record_is_replaying (ptid_t ptid)
07bbe694 1403{
5b6d1e4f
PA
1404 process_stratum_target *proc_target = current_inferior ()->process_target ();
1405 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 1406 if (btrace_is_replaying (tp))
57810aa7 1407 return true;
07bbe694 1408
57810aa7 1409 return false;
07bbe694
MM
1410}
1411
f6ac5f3d 1412/* The record_will_replay method of target record-btrace. */
7ff27e9b 1413
57810aa7 1414bool
f6ac5f3d 1415record_btrace_target::record_will_replay (ptid_t ptid, int dir)
7ff27e9b 1416{
f6ac5f3d 1417 return dir == EXEC_REVERSE || record_is_replaying (ptid);
7ff27e9b
MM
1418}
1419
f6ac5f3d 1420/* The xfer_partial method of target record-btrace. */
633785ff 1421
f6ac5f3d
PA
1422enum target_xfer_status
1423record_btrace_target::xfer_partial (enum target_object object,
1424 const char *annex, gdb_byte *readbuf,
1425 const gdb_byte *writebuf, ULONGEST offset,
1426 ULONGEST len, ULONGEST *xfered_len)
633785ff 1427{
633785ff 1428 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1429 if (replay_memory_access == replay_memory_access_read_only
aef92902 1430 && !record_btrace_generating_corefile
f6ac5f3d 1431 && record_is_replaying (inferior_ptid))
633785ff
MM
1432 {
1433 switch (object)
1434 {
1435 case TARGET_OBJECT_MEMORY:
1436 {
1437 struct target_section *section;
1438
1439 /* We do not allow writing memory in general. */
1440 if (writebuf != NULL)
9b409511
YQ
1441 {
1442 *xfered_len = len;
bc113b4e 1443 return TARGET_XFER_UNAVAILABLE;
9b409511 1444 }
633785ff
MM
1445
1446 /* We allow reading readonly memory. */
f6ac5f3d 1447 section = target_section_by_addr (this, offset);
633785ff
MM
1448 if (section != NULL)
1449 {
1450 /* Check if the section we found is readonly. */
fd361982 1451 if ((bfd_section_flags (section->the_bfd_section)
633785ff
MM
1452 & SEC_READONLY) != 0)
1453 {
1454 /* Truncate the request to fit into this section. */
325fac50 1455 len = std::min (len, section->endaddr - offset);
633785ff
MM
1456 break;
1457 }
1458 }
1459
9b409511 1460 *xfered_len = len;
bc113b4e 1461 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1462 }
1463 }
1464 }
1465
1466 /* Forward the request. */
b6a8c27b
PA
1467 return this->beneath ()->xfer_partial (object, annex, readbuf, writebuf,
1468 offset, len, xfered_len);
633785ff
MM
1469}
1470
f6ac5f3d 1471/* The insert_breakpoint method of target record-btrace. */
633785ff 1472
f6ac5f3d
PA
1473int
1474record_btrace_target::insert_breakpoint (struct gdbarch *gdbarch,
1475 struct bp_target_info *bp_tgt)
633785ff 1476{
67b5c0c1
MM
1477 const char *old;
1478 int ret;
633785ff
MM
1479
1480 /* Inserting breakpoints requires accessing memory. Allow it for the
1481 duration of this function. */
67b5c0c1
MM
1482 old = replay_memory_access;
1483 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1484
1485 ret = 0;
a70b8144 1486 try
492d29ea 1487 {
b6a8c27b 1488 ret = this->beneath ()->insert_breakpoint (gdbarch, bp_tgt);
492d29ea 1489 }
230d2906 1490 catch (const gdb_exception &except)
492d29ea 1491 {
6c63c96a 1492 replay_memory_access = old;
eedc3f4f 1493 throw;
492d29ea 1494 }
6c63c96a 1495 replay_memory_access = old;
633785ff
MM
1496
1497 return ret;
1498}
1499
f6ac5f3d 1500/* The remove_breakpoint method of target record-btrace. */
633785ff 1501
f6ac5f3d
PA
1502int
1503record_btrace_target::remove_breakpoint (struct gdbarch *gdbarch,
1504 struct bp_target_info *bp_tgt,
1505 enum remove_bp_reason reason)
633785ff 1506{
67b5c0c1
MM
1507 const char *old;
1508 int ret;
633785ff
MM
1509
1510 /* Removing breakpoints requires accessing memory. Allow it for the
1511 duration of this function. */
67b5c0c1
MM
1512 old = replay_memory_access;
1513 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1514
1515 ret = 0;
a70b8144 1516 try
492d29ea 1517 {
b6a8c27b 1518 ret = this->beneath ()->remove_breakpoint (gdbarch, bp_tgt, reason);
492d29ea 1519 }
230d2906 1520 catch (const gdb_exception &except)
492d29ea 1521 {
6c63c96a 1522 replay_memory_access = old;
eedc3f4f 1523 throw;
492d29ea 1524 }
6c63c96a 1525 replay_memory_access = old;
633785ff
MM
1526
1527 return ret;
1528}
1529
f6ac5f3d 1530/* The fetch_registers method of target record-btrace. */
1f3ef581 1531
f6ac5f3d
PA
1532void
1533record_btrace_target::fetch_registers (struct regcache *regcache, int regno)
1f3ef581 1534{
1a476b6d
MM
1535 btrace_insn_iterator *replay = nullptr;
1536
1537 /* Thread-db may ask for a thread's registers before GDB knows about the
1538 thread. We forward the request to the target beneath in this
1539 case. */
5b6d1e4f 1540 thread_info *tp = find_thread_ptid (regcache->target (), regcache->ptid ());
1a476b6d
MM
1541 if (tp != nullptr)
1542 replay = tp->btrace.replay;
1f3ef581 1543
1a476b6d 1544 if (replay != nullptr && !record_btrace_generating_corefile)
1f3ef581
MM
1545 {
1546 const struct btrace_insn *insn;
1547 struct gdbarch *gdbarch;
1548 int pcreg;
1549
ac7936df 1550 gdbarch = regcache->arch ();
1f3ef581
MM
1551 pcreg = gdbarch_pc_regnum (gdbarch);
1552 if (pcreg < 0)
1553 return;
1554
1555 /* We can only provide the PC register. */
1556 if (regno >= 0 && regno != pcreg)
1557 return;
1558
1559 insn = btrace_insn_get (replay);
1560 gdb_assert (insn != NULL);
1561
73e1c03f 1562 regcache->raw_supply (regno, &insn->pc);
1f3ef581
MM
1563 }
1564 else
b6a8c27b 1565 this->beneath ()->fetch_registers (regcache, regno);
1f3ef581
MM
1566}
1567
f6ac5f3d 1568/* The store_registers method of target record-btrace. */
1f3ef581 1569
f6ac5f3d
PA
1570void
1571record_btrace_target::store_registers (struct regcache *regcache, int regno)
1f3ef581 1572{
a52eab48 1573 if (!record_btrace_generating_corefile
222312d3 1574 && record_is_replaying (regcache->ptid ()))
4d10e986 1575 error (_("Cannot write registers while replaying."));
1f3ef581 1576
491144b5 1577 gdb_assert (may_write_registers);
1f3ef581 1578
b6a8c27b 1579 this->beneath ()->store_registers (regcache, regno);
1f3ef581
MM
1580}
1581
f6ac5f3d 1582/* The prepare_to_store method of target record-btrace. */
1f3ef581 1583
f6ac5f3d
PA
1584void
1585record_btrace_target::prepare_to_store (struct regcache *regcache)
1f3ef581 1586{
a52eab48 1587 if (!record_btrace_generating_corefile
222312d3 1588 && record_is_replaying (regcache->ptid ()))
1f3ef581
MM
1589 return;
1590
b6a8c27b 1591 this->beneath ()->prepare_to_store (regcache);
1f3ef581
MM
1592}
1593
0b722aec
MM
1594/* The branch trace frame cache. */
1595
1596struct btrace_frame_cache
1597{
1598 /* The thread. */
1599 struct thread_info *tp;
1600
1601 /* The frame info. */
1602 struct frame_info *frame;
1603
1604 /* The branch trace function segment. */
1605 const struct btrace_function *bfun;
1606};
1607
1608/* A struct btrace_frame_cache hash table indexed by NEXT. */
1609
1610static htab_t bfcache;
1611
1612/* hash_f for htab_create_alloc of bfcache. */
1613
1614static hashval_t
1615bfcache_hash (const void *arg)
1616{
19ba03f4
SM
1617 const struct btrace_frame_cache *cache
1618 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1619
1620 return htab_hash_pointer (cache->frame);
1621}
1622
1623/* eq_f for htab_create_alloc of bfcache. */
1624
1625static int
1626bfcache_eq (const void *arg1, const void *arg2)
1627{
19ba03f4
SM
1628 const struct btrace_frame_cache *cache1
1629 = (const struct btrace_frame_cache *) arg1;
1630 const struct btrace_frame_cache *cache2
1631 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1632
1633 return cache1->frame == cache2->frame;
1634}
1635
1636/* Create a new btrace frame cache. */
1637
1638static struct btrace_frame_cache *
1639bfcache_new (struct frame_info *frame)
1640{
1641 struct btrace_frame_cache *cache;
1642 void **slot;
1643
1644 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1645 cache->frame = frame;
1646
1647 slot = htab_find_slot (bfcache, cache, INSERT);
1648 gdb_assert (*slot == NULL);
1649 *slot = cache;
1650
1651 return cache;
1652}
1653
1654/* Extract the branch trace function from a branch trace frame. */
1655
1656static const struct btrace_function *
1657btrace_get_frame_function (struct frame_info *frame)
1658{
1659 const struct btrace_frame_cache *cache;
0b722aec
MM
1660 struct btrace_frame_cache pattern;
1661 void **slot;
1662
1663 pattern.frame = frame;
1664
1665 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1666 if (slot == NULL)
1667 return NULL;
1668
19ba03f4 1669 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1670 return cache->bfun;
1671}
1672
cecac1ab
MM
1673/* Implement stop_reason method for record_btrace_frame_unwind. */
1674
1675static enum unwind_stop_reason
1676record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1677 void **this_cache)
1678{
0b722aec
MM
1679 const struct btrace_frame_cache *cache;
1680 const struct btrace_function *bfun;
1681
19ba03f4 1682 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1683 bfun = cache->bfun;
1684 gdb_assert (bfun != NULL);
1685
42bfe59e 1686 if (bfun->up == 0)
0b722aec
MM
1687 return UNWIND_UNAVAILABLE;
1688
1689 return UNWIND_NO_REASON;
cecac1ab
MM
1690}
1691
1692/* Implement this_id method for record_btrace_frame_unwind. */
1693
1694static void
1695record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1696 struct frame_id *this_id)
1697{
0b722aec
MM
1698 const struct btrace_frame_cache *cache;
1699 const struct btrace_function *bfun;
4aeb0dfc 1700 struct btrace_call_iterator it;
0b722aec
MM
1701 CORE_ADDR code, special;
1702
19ba03f4 1703 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1704
1705 bfun = cache->bfun;
1706 gdb_assert (bfun != NULL);
1707
4aeb0dfc
TW
1708 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1709 bfun = btrace_call_get (&it);
0b722aec
MM
1710
1711 code = get_frame_func (this_frame);
1712 special = bfun->number;
1713
1714 *this_id = frame_id_build_unavailable_stack_special (code, special);
1715
1716 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1717 btrace_get_bfun_name (cache->bfun),
1718 core_addr_to_string_nz (this_id->code_addr),
1719 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1720}
1721
1722/* Implement prev_register method for record_btrace_frame_unwind. */
1723
1724static struct value *
1725record_btrace_frame_prev_register (struct frame_info *this_frame,
1726 void **this_cache,
1727 int regnum)
1728{
0b722aec
MM
1729 const struct btrace_frame_cache *cache;
1730 const struct btrace_function *bfun, *caller;
42bfe59e 1731 struct btrace_call_iterator it;
0b722aec
MM
1732 struct gdbarch *gdbarch;
1733 CORE_ADDR pc;
1734 int pcreg;
1735
1736 gdbarch = get_frame_arch (this_frame);
1737 pcreg = gdbarch_pc_regnum (gdbarch);
1738 if (pcreg < 0 || regnum != pcreg)
1739 throw_error (NOT_AVAILABLE_ERROR,
1740 _("Registers are not available in btrace record history"));
1741
19ba03f4 1742 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1743 bfun = cache->bfun;
1744 gdb_assert (bfun != NULL);
1745
42bfe59e 1746 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1747 throw_error (NOT_AVAILABLE_ERROR,
1748 _("No caller in btrace record history"));
1749
42bfe59e
TW
1750 caller = btrace_call_get (&it);
1751
0b722aec 1752 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1753 pc = caller->insn.front ().pc;
0b722aec
MM
1754 else
1755 {
0860c437 1756 pc = caller->insn.back ().pc;
0b722aec
MM
1757 pc += gdb_insn_length (gdbarch, pc);
1758 }
1759
1760 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1761 btrace_get_bfun_name (bfun), bfun->level,
1762 core_addr_to_string_nz (pc));
1763
1764 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1765}
1766
1767/* Implement sniffer method for record_btrace_frame_unwind. */
1768
1769static int
1770record_btrace_frame_sniffer (const struct frame_unwind *self,
1771 struct frame_info *this_frame,
1772 void **this_cache)
1773{
0b722aec
MM
1774 const struct btrace_function *bfun;
1775 struct btrace_frame_cache *cache;
cecac1ab 1776 struct thread_info *tp;
0b722aec 1777 struct frame_info *next;
cecac1ab
MM
1778
1779 /* THIS_FRAME does not contain a reference to its thread. */
00431a78 1780 tp = inferior_thread ();
cecac1ab 1781
0b722aec
MM
1782 bfun = NULL;
1783 next = get_next_frame (this_frame);
1784 if (next == NULL)
1785 {
1786 const struct btrace_insn_iterator *replay;
1787
1788 replay = tp->btrace.replay;
1789 if (replay != NULL)
08c3f6d2 1790 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1791 }
1792 else
1793 {
1794 const struct btrace_function *callee;
42bfe59e 1795 struct btrace_call_iterator it;
0b722aec
MM
1796
1797 callee = btrace_get_frame_function (next);
42bfe59e
TW
1798 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1799 return 0;
1800
1801 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1802 return 0;
1803
1804 bfun = btrace_call_get (&it);
0b722aec
MM
1805 }
1806
1807 if (bfun == NULL)
1808 return 0;
1809
1810 DEBUG ("[frame] sniffed frame for %s on level %d",
1811 btrace_get_bfun_name (bfun), bfun->level);
1812
1813 /* This is our frame. Initialize the frame cache. */
1814 cache = bfcache_new (this_frame);
1815 cache->tp = tp;
1816 cache->bfun = bfun;
1817
1818 *this_cache = cache;
1819 return 1;
1820}
1821
1822/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1823
1824static int
1825record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1826 struct frame_info *this_frame,
1827 void **this_cache)
1828{
1829 const struct btrace_function *bfun, *callee;
1830 struct btrace_frame_cache *cache;
42bfe59e 1831 struct btrace_call_iterator it;
0b722aec 1832 struct frame_info *next;
42bfe59e 1833 struct thread_info *tinfo;
0b722aec
MM
1834
1835 next = get_next_frame (this_frame);
1836 if (next == NULL)
1837 return 0;
1838
1839 callee = btrace_get_frame_function (next);
1840 if (callee == NULL)
1841 return 0;
1842
1843 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1844 return 0;
1845
00431a78 1846 tinfo = inferior_thread ();
42bfe59e 1847 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1848 return 0;
1849
42bfe59e
TW
1850 bfun = btrace_call_get (&it);
1851
0b722aec
MM
1852 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1853 btrace_get_bfun_name (bfun), bfun->level);
1854
1855 /* This is our frame. Initialize the frame cache. */
1856 cache = bfcache_new (this_frame);
42bfe59e 1857 cache->tp = tinfo;
0b722aec
MM
1858 cache->bfun = bfun;
1859
1860 *this_cache = cache;
1861 return 1;
1862}
1863
1864static void
1865record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1866{
1867 struct btrace_frame_cache *cache;
1868 void **slot;
1869
19ba03f4 1870 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1871
1872 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1873 gdb_assert (slot != NULL);
1874
1875 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1876}
1877
1878/* btrace recording does not store previous memory content, neither the stack
30baf67b 1879 frames content. Any unwinding would return erroneous results as the stack
cecac1ab
MM
1880 contents no longer matches the changed PC value restored from history.
1881 Therefore this unwinder reports any possibly unwound registers as
1882 <unavailable>. */
1883
0b722aec 1884const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1885{
1886 NORMAL_FRAME,
1887 record_btrace_frame_unwind_stop_reason,
1888 record_btrace_frame_this_id,
1889 record_btrace_frame_prev_register,
1890 NULL,
0b722aec
MM
1891 record_btrace_frame_sniffer,
1892 record_btrace_frame_dealloc_cache
1893};
1894
1895const struct frame_unwind record_btrace_tailcall_frame_unwind =
1896{
1897 TAILCALL_FRAME,
1898 record_btrace_frame_unwind_stop_reason,
1899 record_btrace_frame_this_id,
1900 record_btrace_frame_prev_register,
1901 NULL,
1902 record_btrace_tailcall_frame_sniffer,
1903 record_btrace_frame_dealloc_cache
cecac1ab 1904};
b2f4cfde 1905
f6ac5f3d 1906/* Implement the get_unwinder method. */
ac01945b 1907
f6ac5f3d
PA
1908const struct frame_unwind *
1909record_btrace_target::get_unwinder ()
ac01945b
TT
1910{
1911 return &record_btrace_frame_unwind;
1912}
1913
f6ac5f3d 1914/* Implement the get_tailcall_unwinder method. */
ac01945b 1915
f6ac5f3d
PA
1916const struct frame_unwind *
1917record_btrace_target::get_tailcall_unwinder ()
ac01945b
TT
1918{
1919 return &record_btrace_tailcall_frame_unwind;
1920}
1921
987e68b1
MM
1922/* Return a human-readable string for FLAG. */
1923
1924static const char *
1925btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1926{
1927 switch (flag)
1928 {
1929 case BTHR_STEP:
1930 return "step";
1931
1932 case BTHR_RSTEP:
1933 return "reverse-step";
1934
1935 case BTHR_CONT:
1936 return "cont";
1937
1938 case BTHR_RCONT:
1939 return "reverse-cont";
1940
1941 case BTHR_STOP:
1942 return "stop";
1943 }
1944
1945 return "<invalid>";
1946}
1947
52834460
MM
1948/* Indicate that TP should be resumed according to FLAG. */
1949
1950static void
1951record_btrace_resume_thread (struct thread_info *tp,
1952 enum btrace_thread_flag flag)
1953{
1954 struct btrace_thread_info *btinfo;
1955
43792cf0 1956 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d
TT
1957 target_pid_to_str (tp->ptid).c_str (), flag,
1958 btrace_thread_flag_to_str (flag));
52834460
MM
1959
1960 btinfo = &tp->btrace;
1961
52834460 1962 /* Fetch the latest branch trace. */
4a4495d6 1963 btrace_fetch (tp, record_btrace_get_cpu ());
52834460 1964
0ca912df
MM
1965 /* A resume request overwrites a preceding resume or stop request. */
1966 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1967 btinfo->flags |= flag;
1968}
1969
ec71cc2f
MM
1970/* Get the current frame for TP. */
1971
79b8d3b0
TT
1972static struct frame_id
1973get_thread_current_frame_id (struct thread_info *tp)
ec71cc2f 1974{
79b8d3b0 1975 struct frame_id id;
719546c4 1976 bool executing;
ec71cc2f 1977
00431a78
PA
1978 /* Set current thread, which is implicitly used by
1979 get_current_frame. */
1980 scoped_restore_current_thread restore_thread;
1981
1982 switch_to_thread (tp);
ec71cc2f 1983
5b6d1e4f
PA
1984 process_stratum_target *proc_target = tp->inf->process_target ();
1985
ec71cc2f
MM
1986 /* Clear the executing flag to allow changes to the current frame.
1987 We are not actually running, yet. We just started a reverse execution
1988 command or a record goto command.
1989 For the latter, EXECUTING is false and this has no effect.
f6ac5f3d 1990 For the former, EXECUTING is true and we're in wait, about to
ec71cc2f 1991 move the thread. Since we need to recompute the stack, we temporarily
85102364 1992 set EXECUTING to false. */
00431a78 1993 executing = tp->executing;
5b6d1e4f 1994 set_executing (proc_target, inferior_ptid, false);
ec71cc2f 1995
79b8d3b0 1996 id = null_frame_id;
a70b8144 1997 try
ec71cc2f 1998 {
79b8d3b0 1999 id = get_frame_id (get_current_frame ());
ec71cc2f 2000 }
230d2906 2001 catch (const gdb_exception &except)
ec71cc2f
MM
2002 {
2003 /* Restore the previous execution state. */
5b6d1e4f 2004 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2005
eedc3f4f 2006 throw;
ec71cc2f 2007 }
ec71cc2f
MM
2008
2009 /* Restore the previous execution state. */
5b6d1e4f 2010 set_executing (proc_target, inferior_ptid, executing);
ec71cc2f 2011
79b8d3b0 2012 return id;
ec71cc2f
MM
2013}
2014
52834460
MM
2015/* Start replaying a thread. */
2016
2017static struct btrace_insn_iterator *
2018record_btrace_start_replaying (struct thread_info *tp)
2019{
52834460
MM
2020 struct btrace_insn_iterator *replay;
2021 struct btrace_thread_info *btinfo;
52834460
MM
2022
2023 btinfo = &tp->btrace;
2024 replay = NULL;
2025
2026 /* We can't start replaying without trace. */
b54b03bd 2027 if (btinfo->functions.empty ())
52834460
MM
2028 return NULL;
2029
52834460
MM
2030 /* GDB stores the current frame_id when stepping in order to detects steps
2031 into subroutines.
2032 Since frames are computed differently when we're replaying, we need to
2033 recompute those stored frames and fix them up so we can still detect
2034 subroutines after we started replaying. */
a70b8144 2035 try
52834460 2036 {
52834460
MM
2037 struct frame_id frame_id;
2038 int upd_step_frame_id, upd_step_stack_frame_id;
2039
2040 /* The current frame without replaying - computed via normal unwind. */
79b8d3b0 2041 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2042
2043 /* Check if we need to update any stepping-related frame id's. */
2044 upd_step_frame_id = frame_id_eq (frame_id,
2045 tp->control.step_frame_id);
2046 upd_step_stack_frame_id = frame_id_eq (frame_id,
2047 tp->control.step_stack_frame_id);
2048
2049 /* We start replaying at the end of the branch trace. This corresponds
2050 to the current instruction. */
8d749320 2051 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2052 btrace_insn_end (replay, btinfo);
2053
31fd9caa
MM
2054 /* Skip gaps at the end of the trace. */
2055 while (btrace_insn_get (replay) == NULL)
2056 {
2057 unsigned int steps;
2058
2059 steps = btrace_insn_prev (replay, 1);
2060 if (steps == 0)
2061 error (_("No trace."));
2062 }
2063
52834460
MM
2064 /* We're not replaying, yet. */
2065 gdb_assert (btinfo->replay == NULL);
2066 btinfo->replay = replay;
2067
2068 /* Make sure we're not using any stale registers. */
00431a78 2069 registers_changed_thread (tp);
52834460
MM
2070
2071 /* The current frame with replaying - computed via btrace unwind. */
79b8d3b0 2072 frame_id = get_thread_current_frame_id (tp);
52834460
MM
2073
2074 /* Replace stepping related frames where necessary. */
2075 if (upd_step_frame_id)
2076 tp->control.step_frame_id = frame_id;
2077 if (upd_step_stack_frame_id)
2078 tp->control.step_stack_frame_id = frame_id;
2079 }
230d2906 2080 catch (const gdb_exception &except)
52834460
MM
2081 {
2082 xfree (btinfo->replay);
2083 btinfo->replay = NULL;
2084
00431a78 2085 registers_changed_thread (tp);
52834460 2086
eedc3f4f 2087 throw;
52834460
MM
2088 }
2089
2090 return replay;
2091}
2092
2093/* Stop replaying a thread. */
2094
2095static void
2096record_btrace_stop_replaying (struct thread_info *tp)
2097{
2098 struct btrace_thread_info *btinfo;
2099
2100 btinfo = &tp->btrace;
2101
2102 xfree (btinfo->replay);
2103 btinfo->replay = NULL;
2104
2105 /* Make sure we're not leaving any stale registers. */
00431a78 2106 registers_changed_thread (tp);
52834460
MM
2107}
2108
e3cfc1c7
MM
2109/* Stop replaying TP if it is at the end of its execution history. */
2110
2111static void
2112record_btrace_stop_replaying_at_end (struct thread_info *tp)
2113{
2114 struct btrace_insn_iterator *replay, end;
2115 struct btrace_thread_info *btinfo;
2116
2117 btinfo = &tp->btrace;
2118 replay = btinfo->replay;
2119
2120 if (replay == NULL)
2121 return;
2122
2123 btrace_insn_end (&end, btinfo);
2124
2125 if (btrace_insn_cmp (replay, &end) == 0)
2126 record_btrace_stop_replaying (tp);
2127}
2128
f6ac5f3d 2129/* The resume method of target record-btrace. */
b2f4cfde 2130
f6ac5f3d
PA
2131void
2132record_btrace_target::resume (ptid_t ptid, int step, enum gdb_signal signal)
b2f4cfde 2133{
d2939ba2 2134 enum btrace_thread_flag flag, cflag;
52834460 2135
a068643d 2136 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid).c_str (),
f6ac5f3d 2137 ::execution_direction == EXEC_REVERSE ? "reverse-" : "",
987e68b1 2138 step ? "step" : "cont");
52834460 2139
0ca912df
MM
2140 /* Store the execution direction of the last resume.
2141
f6ac5f3d 2142 If there is more than one resume call, we have to rely on infrun
0ca912df 2143 to not change the execution direction in-between. */
f6ac5f3d 2144 record_btrace_resume_exec_dir = ::execution_direction;
70ad5bff 2145
0ca912df 2146 /* As long as we're not replaying, just forward the request.
52834460 2147
0ca912df
MM
2148 For non-stop targets this means that no thread is replaying. In order to
2149 make progress, we may need to explicitly move replaying threads to the end
2150 of their execution history. */
f6ac5f3d
PA
2151 if ((::execution_direction != EXEC_REVERSE)
2152 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2153 {
b6a8c27b 2154 this->beneath ()->resume (ptid, step, signal);
04c4fe8c 2155 return;
b2f4cfde
MM
2156 }
2157
52834460 2158 /* Compute the btrace thread flag for the requested move. */
f6ac5f3d 2159 if (::execution_direction == EXEC_REVERSE)
d2939ba2
MM
2160 {
2161 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2162 cflag = BTHR_RCONT;
2163 }
52834460 2164 else
d2939ba2
MM
2165 {
2166 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2167 cflag = BTHR_CONT;
2168 }
52834460 2169
52834460 2170 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2171 record_btrace_wait below.
2172
2173 For all-stop targets, we only step INFERIOR_PTID and continue others. */
5b6d1e4f
PA
2174
2175 process_stratum_target *proc_target = current_inferior ()->process_target ();
2176
d2939ba2
MM
2177 if (!target_is_non_stop_p ())
2178 {
26a57c92 2179 gdb_assert (inferior_ptid.matches (ptid));
d2939ba2 2180
5b6d1e4f 2181 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2182 {
2183 if (tp->ptid.matches (inferior_ptid))
2184 record_btrace_resume_thread (tp, flag);
2185 else
2186 record_btrace_resume_thread (tp, cflag);
2187 }
d2939ba2
MM
2188 }
2189 else
2190 {
5b6d1e4f 2191 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331 2192 record_btrace_resume_thread (tp, flag);
d2939ba2 2193 }
70ad5bff
MM
2194
2195 /* Async support. */
2196 if (target_can_async_p ())
2197 {
6a3753b3 2198 target_async (1);
70ad5bff
MM
2199 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2200 }
52834460
MM
2201}
2202
f6ac5f3d 2203/* The commit_resume method of target record-btrace. */
85ad3aaf 2204
f6ac5f3d
PA
2205void
2206record_btrace_target::commit_resume ()
85ad3aaf 2207{
f6ac5f3d
PA
2208 if ((::execution_direction != EXEC_REVERSE)
2209 && !record_is_replaying (minus_one_ptid))
b6a8c27b 2210 beneath ()->commit_resume ();
85ad3aaf
PA
2211}
2212
987e68b1
MM
2213/* Cancel resuming TP. */
2214
2215static void
2216record_btrace_cancel_resume (struct thread_info *tp)
2217{
2218 enum btrace_thread_flag flags;
2219
2220 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2221 if (flags == 0)
2222 return;
2223
43792cf0
PA
2224 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2225 print_thread_id (tp),
a068643d 2226 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1
MM
2227 btrace_thread_flag_to_str (flags));
2228
2229 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2230 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2231}
2232
2233/* Return a target_waitstatus indicating that we ran out of history. */
2234
2235static struct target_waitstatus
2236btrace_step_no_history (void)
2237{
2238 struct target_waitstatus status;
2239
2240 status.kind = TARGET_WAITKIND_NO_HISTORY;
2241
2242 return status;
2243}
2244
2245/* Return a target_waitstatus indicating that a step finished. */
2246
2247static struct target_waitstatus
2248btrace_step_stopped (void)
2249{
2250 struct target_waitstatus status;
2251
2252 status.kind = TARGET_WAITKIND_STOPPED;
2253 status.value.sig = GDB_SIGNAL_TRAP;
2254
2255 return status;
2256}
2257
6e4879f0
MM
2258/* Return a target_waitstatus indicating that a thread was stopped as
2259 requested. */
2260
2261static struct target_waitstatus
2262btrace_step_stopped_on_request (void)
2263{
2264 struct target_waitstatus status;
2265
2266 status.kind = TARGET_WAITKIND_STOPPED;
2267 status.value.sig = GDB_SIGNAL_0;
2268
2269 return status;
2270}
2271
d825d248
MM
2272/* Return a target_waitstatus indicating a spurious stop. */
2273
2274static struct target_waitstatus
2275btrace_step_spurious (void)
2276{
2277 struct target_waitstatus status;
2278
2279 status.kind = TARGET_WAITKIND_SPURIOUS;
2280
2281 return status;
2282}
2283
e3cfc1c7
MM
2284/* Return a target_waitstatus indicating that the thread was not resumed. */
2285
2286static struct target_waitstatus
2287btrace_step_no_resumed (void)
2288{
2289 struct target_waitstatus status;
2290
2291 status.kind = TARGET_WAITKIND_NO_RESUMED;
2292
2293 return status;
2294}
2295
2296/* Return a target_waitstatus indicating that we should wait again. */
2297
2298static struct target_waitstatus
2299btrace_step_again (void)
2300{
2301 struct target_waitstatus status;
2302
2303 status.kind = TARGET_WAITKIND_IGNORE;
2304
2305 return status;
2306}
2307
52834460
MM
2308/* Clear the record histories. */
2309
2310static void
2311record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2312{
2313 xfree (btinfo->insn_history);
2314 xfree (btinfo->call_history);
2315
2316 btinfo->insn_history = NULL;
2317 btinfo->call_history = NULL;
2318}
2319
3c615f99
MM
2320/* Check whether TP's current replay position is at a breakpoint. */
2321
2322static int
2323record_btrace_replay_at_breakpoint (struct thread_info *tp)
2324{
2325 struct btrace_insn_iterator *replay;
2326 struct btrace_thread_info *btinfo;
2327 const struct btrace_insn *insn;
3c615f99
MM
2328
2329 btinfo = &tp->btrace;
2330 replay = btinfo->replay;
2331
2332 if (replay == NULL)
2333 return 0;
2334
2335 insn = btrace_insn_get (replay);
2336 if (insn == NULL)
2337 return 0;
2338
00431a78 2339 return record_check_stopped_by_breakpoint (tp->inf->aspace, insn->pc,
3c615f99
MM
2340 &btinfo->stop_reason);
2341}
2342
d825d248 2343/* Step one instruction in forward direction. */
52834460
MM
2344
2345static struct target_waitstatus
d825d248 2346record_btrace_single_step_forward (struct thread_info *tp)
52834460 2347{
b61ce85c 2348 struct btrace_insn_iterator *replay, end, start;
52834460 2349 struct btrace_thread_info *btinfo;
52834460 2350
d825d248
MM
2351 btinfo = &tp->btrace;
2352 replay = btinfo->replay;
2353
2354 /* We're done if we're not replaying. */
2355 if (replay == NULL)
2356 return btrace_step_no_history ();
2357
011c71b6
MM
2358 /* Check if we're stepping a breakpoint. */
2359 if (record_btrace_replay_at_breakpoint (tp))
2360 return btrace_step_stopped ();
2361
b61ce85c
MM
2362 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2363 jump back to the instruction at which we started. */
2364 start = *replay;
d825d248
MM
2365 do
2366 {
2367 unsigned int steps;
2368
e3cfc1c7
MM
2369 /* We will bail out here if we continue stepping after reaching the end
2370 of the execution history. */
d825d248
MM
2371 steps = btrace_insn_next (replay, 1);
2372 if (steps == 0)
b61ce85c
MM
2373 {
2374 *replay = start;
2375 return btrace_step_no_history ();
2376 }
d825d248
MM
2377 }
2378 while (btrace_insn_get (replay) == NULL);
2379
2380 /* Determine the end of the instruction trace. */
2381 btrace_insn_end (&end, btinfo);
2382
e3cfc1c7
MM
2383 /* The execution trace contains (and ends with) the current instruction.
2384 This instruction has not been executed, yet, so the trace really ends
2385 one instruction earlier. */
d825d248 2386 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2387 return btrace_step_no_history ();
d825d248
MM
2388
2389 return btrace_step_spurious ();
2390}
2391
2392/* Step one instruction in backward direction. */
2393
2394static struct target_waitstatus
2395record_btrace_single_step_backward (struct thread_info *tp)
2396{
b61ce85c 2397 struct btrace_insn_iterator *replay, start;
d825d248 2398 struct btrace_thread_info *btinfo;
e59fa00f 2399
52834460
MM
2400 btinfo = &tp->btrace;
2401 replay = btinfo->replay;
2402
d825d248
MM
2403 /* Start replaying if we're not already doing so. */
2404 if (replay == NULL)
2405 replay = record_btrace_start_replaying (tp);
2406
2407 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2408 Skip gaps during replay. If we end up at a gap (at the beginning of
2409 the trace), jump back to the instruction at which we started. */
2410 start = *replay;
d825d248
MM
2411 do
2412 {
2413 unsigned int steps;
2414
2415 steps = btrace_insn_prev (replay, 1);
2416 if (steps == 0)
b61ce85c
MM
2417 {
2418 *replay = start;
2419 return btrace_step_no_history ();
2420 }
d825d248
MM
2421 }
2422 while (btrace_insn_get (replay) == NULL);
2423
011c71b6
MM
2424 /* Check if we're stepping a breakpoint.
2425
2426 For reverse-stepping, this check is after the step. There is logic in
2427 infrun.c that handles reverse-stepping separately. See, for example,
2428 proceed and adjust_pc_after_break.
2429
2430 This code assumes that for reverse-stepping, PC points to the last
2431 de-executed instruction, whereas for forward-stepping PC points to the
2432 next to-be-executed instruction. */
2433 if (record_btrace_replay_at_breakpoint (tp))
2434 return btrace_step_stopped ();
2435
d825d248
MM
2436 return btrace_step_spurious ();
2437}
2438
2439/* Step a single thread. */
2440
2441static struct target_waitstatus
2442record_btrace_step_thread (struct thread_info *tp)
2443{
2444 struct btrace_thread_info *btinfo;
2445 struct target_waitstatus status;
2446 enum btrace_thread_flag flags;
2447
2448 btinfo = &tp->btrace;
2449
6e4879f0
MM
2450 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2451 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2452
43792cf0 2453 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
a068643d 2454 target_pid_to_str (tp->ptid).c_str (), flags,
987e68b1 2455 btrace_thread_flag_to_str (flags));
52834460 2456
6e4879f0
MM
2457 /* We can't step without an execution history. */
2458 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2459 return btrace_step_no_history ();
2460
52834460
MM
2461 switch (flags)
2462 {
2463 default:
2464 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2465
6e4879f0
MM
2466 case BTHR_STOP:
2467 return btrace_step_stopped_on_request ();
2468
52834460 2469 case BTHR_STEP:
d825d248
MM
2470 status = record_btrace_single_step_forward (tp);
2471 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2472 break;
52834460
MM
2473
2474 return btrace_step_stopped ();
2475
2476 case BTHR_RSTEP:
d825d248
MM
2477 status = record_btrace_single_step_backward (tp);
2478 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2479 break;
52834460
MM
2480
2481 return btrace_step_stopped ();
2482
2483 case BTHR_CONT:
e3cfc1c7
MM
2484 status = record_btrace_single_step_forward (tp);
2485 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2486 break;
52834460 2487
e3cfc1c7
MM
2488 btinfo->flags |= flags;
2489 return btrace_step_again ();
52834460
MM
2490
2491 case BTHR_RCONT:
e3cfc1c7
MM
2492 status = record_btrace_single_step_backward (tp);
2493 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2494 break;
52834460 2495
e3cfc1c7
MM
2496 btinfo->flags |= flags;
2497 return btrace_step_again ();
2498 }
d825d248 2499
f6ac5f3d 2500 /* We keep threads moving at the end of their execution history. The wait
e3cfc1c7
MM
2501 method will stop the thread for whom the event is reported. */
2502 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2503 btinfo->flags |= flags;
52834460 2504
e3cfc1c7 2505 return status;
b2f4cfde
MM
2506}
2507
a6b5be76
MM
2508/* Announce further events if necessary. */
2509
2510static void
53127008
SM
2511record_btrace_maybe_mark_async_event
2512 (const std::vector<thread_info *> &moving,
2513 const std::vector<thread_info *> &no_history)
a6b5be76 2514{
53127008
SM
2515 bool more_moving = !moving.empty ();
2516 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2517
2518 if (!more_moving && !more_no_history)
2519 return;
2520
2521 if (more_moving)
2522 DEBUG ("movers pending");
2523
2524 if (more_no_history)
2525 DEBUG ("no-history pending");
2526
2527 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2528}
2529
f6ac5f3d 2530/* The wait method of target record-btrace. */
b2f4cfde 2531
f6ac5f3d
PA
2532ptid_t
2533record_btrace_target::wait (ptid_t ptid, struct target_waitstatus *status,
2534 int options)
b2f4cfde 2535{
53127008
SM
2536 std::vector<thread_info *> moving;
2537 std::vector<thread_info *> no_history;
52834460 2538
a068643d 2539 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid).c_str (), options);
52834460 2540
b2f4cfde 2541 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2542 if ((::execution_direction != EXEC_REVERSE)
2543 && !record_is_replaying (minus_one_ptid))
b2f4cfde 2544 {
b6a8c27b 2545 return this->beneath ()->wait (ptid, status, options);
b2f4cfde
MM
2546 }
2547
e3cfc1c7 2548 /* Keep a work list of moving threads. */
5b6d1e4f
PA
2549 process_stratum_target *proc_target = current_inferior ()->process_target ();
2550 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2551 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
2552 moving.push_back (tp);
e3cfc1c7 2553
53127008 2554 if (moving.empty ())
52834460 2555 {
e3cfc1c7 2556 *status = btrace_step_no_resumed ();
52834460 2557
a068643d 2558 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid).c_str (),
23fdd69e 2559 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2560
e3cfc1c7 2561 return null_ptid;
52834460
MM
2562 }
2563
e3cfc1c7
MM
2564 /* Step moving threads one by one, one step each, until either one thread
2565 reports an event or we run out of threads to step.
2566
2567 When stepping more than one thread, chances are that some threads reach
2568 the end of their execution history earlier than others. If we reported
2569 this immediately, all-stop on top of non-stop would stop all threads and
2570 resume the same threads next time. And we would report the same thread
2571 having reached the end of its execution history again.
2572
2573 In the worst case, this would starve the other threads. But even if other
2574 threads would be allowed to make progress, this would result in far too
2575 many intermediate stops.
2576
2577 We therefore delay the reporting of "no execution history" until we have
2578 nothing else to report. By this time, all threads should have moved to
2579 either the beginning or the end of their execution history. There will
2580 be a single user-visible stop. */
53127008
SM
2581 struct thread_info *eventing = NULL;
2582 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2583 {
53127008 2584 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2585 {
53127008
SM
2586 thread_info *tp = moving[ix];
2587
e3cfc1c7
MM
2588 *status = record_btrace_step_thread (tp);
2589
2590 switch (status->kind)
2591 {
2592 case TARGET_WAITKIND_IGNORE:
2593 ix++;
2594 break;
2595
2596 case TARGET_WAITKIND_NO_HISTORY:
53127008 2597 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2598 break;
2599
2600 default:
53127008 2601 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2602 break;
2603 }
2604 }
2605 }
2606
2607 if (eventing == NULL)
2608 {
2609 /* We started with at least one moving thread. This thread must have
2610 either stopped or reached the end of its execution history.
2611
2612 In the former case, EVENTING must not be NULL.
2613 In the latter case, NO_HISTORY must not be empty. */
53127008 2614 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2615
2616 /* We kept threads moving at the end of their execution history. Stop
2617 EVENTING now that we are going to report its stop. */
53127008 2618 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2619 eventing->btrace.flags &= ~BTHR_MOVE;
2620
2621 *status = btrace_step_no_history ();
2622 }
2623
2624 gdb_assert (eventing != NULL);
2625
2626 /* We kept threads replaying at the end of their execution history. Stop
2627 replaying EVENTING now that we are going to report its stop. */
2628 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2629
2630 /* Stop all other threads. */
5953356c 2631 if (!target_is_non_stop_p ())
53127008 2632 {
08036331 2633 for (thread_info *tp : all_non_exited_threads ())
53127008
SM
2634 record_btrace_cancel_resume (tp);
2635 }
52834460 2636
a6b5be76
MM
2637 /* In async mode, we need to announce further events. */
2638 if (target_is_async_p ())
2639 record_btrace_maybe_mark_async_event (moving, no_history);
2640
52834460 2641 /* Start record histories anew from the current position. */
e3cfc1c7 2642 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2643
2644 /* We moved the replay position but did not update registers. */
00431a78 2645 registers_changed_thread (eventing);
e3cfc1c7 2646
43792cf0
PA
2647 DEBUG ("wait ended by thread %s (%s): %s",
2648 print_thread_id (eventing),
a068643d 2649 target_pid_to_str (eventing->ptid).c_str (),
23fdd69e 2650 target_waitstatus_to_string (status).c_str ());
52834460 2651
e3cfc1c7 2652 return eventing->ptid;
52834460
MM
2653}
2654
f6ac5f3d 2655/* The stop method of target record-btrace. */
6e4879f0 2656
f6ac5f3d
PA
2657void
2658record_btrace_target::stop (ptid_t ptid)
6e4879f0 2659{
a068643d 2660 DEBUG ("stop %s", target_pid_to_str (ptid).c_str ());
6e4879f0
MM
2661
2662 /* As long as we're not replaying, just forward the request. */
f6ac5f3d
PA
2663 if ((::execution_direction != EXEC_REVERSE)
2664 && !record_is_replaying (minus_one_ptid))
6e4879f0 2665 {
b6a8c27b 2666 this->beneath ()->stop (ptid);
6e4879f0
MM
2667 }
2668 else
2669 {
5b6d1e4f
PA
2670 process_stratum_target *proc_target
2671 = current_inferior ()->process_target ();
2672
2673 for (thread_info *tp : all_non_exited_threads (proc_target, ptid))
08036331
PA
2674 {
2675 tp->btrace.flags &= ~BTHR_MOVE;
2676 tp->btrace.flags |= BTHR_STOP;
2677 }
6e4879f0
MM
2678 }
2679 }
2680
f6ac5f3d 2681/* The can_execute_reverse method of target record-btrace. */
52834460 2682
57810aa7 2683bool
f6ac5f3d 2684record_btrace_target::can_execute_reverse ()
52834460 2685{
57810aa7 2686 return true;
52834460
MM
2687}
2688
f6ac5f3d 2689/* The stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2690
57810aa7 2691bool
f6ac5f3d 2692record_btrace_target::stopped_by_sw_breakpoint ()
52834460 2693{
f6ac5f3d 2694 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2695 {
2696 struct thread_info *tp = inferior_thread ();
2697
2698 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2699 }
2700
b6a8c27b 2701 return this->beneath ()->stopped_by_sw_breakpoint ();
9e8915c6
PA
2702}
2703
f6ac5f3d 2704/* The supports_stopped_by_sw_breakpoint method of target
9e8915c6
PA
2705 record-btrace. */
2706
57810aa7 2707bool
f6ac5f3d 2708record_btrace_target::supports_stopped_by_sw_breakpoint ()
9e8915c6 2709{
f6ac5f3d 2710 if (record_is_replaying (minus_one_ptid))
57810aa7 2711 return true;
9e8915c6 2712
b6a8c27b 2713 return this->beneath ()->supports_stopped_by_sw_breakpoint ();
9e8915c6
PA
2714}
2715
f6ac5f3d 2716/* The stopped_by_sw_breakpoint method of target record-btrace. */
9e8915c6 2717
57810aa7 2718bool
f6ac5f3d 2719record_btrace_target::stopped_by_hw_breakpoint ()
9e8915c6 2720{
f6ac5f3d 2721 if (record_is_replaying (minus_one_ptid))
9e8915c6
PA
2722 {
2723 struct thread_info *tp = inferior_thread ();
2724
2725 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2726 }
2727
b6a8c27b 2728 return this->beneath ()->stopped_by_hw_breakpoint ();
9e8915c6
PA
2729}
2730
f6ac5f3d 2731/* The supports_stopped_by_hw_breakpoint method of target
9e8915c6
PA
2732 record-btrace. */
2733
57810aa7 2734bool
f6ac5f3d 2735record_btrace_target::supports_stopped_by_hw_breakpoint ()
9e8915c6 2736{
f6ac5f3d 2737 if (record_is_replaying (minus_one_ptid))
57810aa7 2738 return true;
52834460 2739
b6a8c27b 2740 return this->beneath ()->supports_stopped_by_hw_breakpoint ();
b2f4cfde
MM
2741}
2742
f6ac5f3d 2743/* The update_thread_list method of target record-btrace. */
e2887aa3 2744
f6ac5f3d
PA
2745void
2746record_btrace_target::update_thread_list ()
e2887aa3 2747{
e8032dde 2748 /* We don't add or remove threads during replay. */
f6ac5f3d 2749 if (record_is_replaying (minus_one_ptid))
e2887aa3
MM
2750 return;
2751
2752 /* Forward the request. */
b6a8c27b 2753 this->beneath ()->update_thread_list ();
e2887aa3
MM
2754}
2755
f6ac5f3d 2756/* The thread_alive method of target record-btrace. */
e2887aa3 2757
57810aa7 2758bool
f6ac5f3d 2759record_btrace_target::thread_alive (ptid_t ptid)
e2887aa3
MM
2760{
2761 /* We don't add or remove threads during replay. */
f6ac5f3d 2762 if (record_is_replaying (minus_one_ptid))
00431a78 2763 return true;
e2887aa3
MM
2764
2765 /* Forward the request. */
b6a8c27b 2766 return this->beneath ()->thread_alive (ptid);
e2887aa3
MM
2767}
2768
066ce621
MM
2769/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2770 is stopped. */
2771
2772static void
2773record_btrace_set_replay (struct thread_info *tp,
2774 const struct btrace_insn_iterator *it)
2775{
2776 struct btrace_thread_info *btinfo;
2777
2778 btinfo = &tp->btrace;
2779
a0f1b963 2780 if (it == NULL)
52834460 2781 record_btrace_stop_replaying (tp);
066ce621
MM
2782 else
2783 {
2784 if (btinfo->replay == NULL)
52834460 2785 record_btrace_start_replaying (tp);
066ce621
MM
2786 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2787 return;
2788
2789 *btinfo->replay = *it;
00431a78 2790 registers_changed_thread (tp);
066ce621
MM
2791 }
2792
52834460
MM
2793 /* Start anew from the new replay position. */
2794 record_btrace_clear_histories (btinfo);
485668e5 2795
f2ffa92b
PA
2796 inferior_thread ()->suspend.stop_pc
2797 = regcache_read_pc (get_current_regcache ());
485668e5 2798 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2799}
2800
f6ac5f3d 2801/* The goto_record_begin method of target record-btrace. */
066ce621 2802
f6ac5f3d
PA
2803void
2804record_btrace_target::goto_record_begin ()
066ce621
MM
2805{
2806 struct thread_info *tp;
2807 struct btrace_insn_iterator begin;
2808
2809 tp = require_btrace_thread ();
2810
2811 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2812
2813 /* Skip gaps at the beginning of the trace. */
2814 while (btrace_insn_get (&begin) == NULL)
2815 {
2816 unsigned int steps;
2817
2818 steps = btrace_insn_next (&begin, 1);
2819 if (steps == 0)
2820 error (_("No trace."));
2821 }
2822
066ce621 2823 record_btrace_set_replay (tp, &begin);
066ce621
MM
2824}
2825
f6ac5f3d 2826/* The goto_record_end method of target record-btrace. */
066ce621 2827
f6ac5f3d
PA
2828void
2829record_btrace_target::goto_record_end ()
066ce621
MM
2830{
2831 struct thread_info *tp;
2832
2833 tp = require_btrace_thread ();
2834
2835 record_btrace_set_replay (tp, NULL);
066ce621
MM
2836}
2837
f6ac5f3d 2838/* The goto_record method of target record-btrace. */
066ce621 2839
f6ac5f3d
PA
2840void
2841record_btrace_target::goto_record (ULONGEST insn)
066ce621
MM
2842{
2843 struct thread_info *tp;
2844 struct btrace_insn_iterator it;
2845 unsigned int number;
2846 int found;
2847
2848 number = insn;
2849
2850 /* Check for wrap-arounds. */
2851 if (number != insn)
2852 error (_("Instruction number out of range."));
2853
2854 tp = require_btrace_thread ();
2855
2856 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2857
2858 /* Check if the instruction could not be found or is a gap. */
2859 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2860 error (_("No such instruction."));
2861
2862 record_btrace_set_replay (tp, &it);
066ce621
MM
2863}
2864
f6ac5f3d 2865/* The record_stop_replaying method of target record-btrace. */
797094dd 2866
f6ac5f3d
PA
2867void
2868record_btrace_target::record_stop_replaying ()
797094dd 2869{
08036331 2870 for (thread_info *tp : all_non_exited_threads ())
797094dd
MM
2871 record_btrace_stop_replaying (tp);
2872}
2873
f6ac5f3d 2874/* The execution_direction target method. */
70ad5bff 2875
f6ac5f3d
PA
2876enum exec_direction_kind
2877record_btrace_target::execution_direction ()
70ad5bff
MM
2878{
2879 return record_btrace_resume_exec_dir;
2880}
2881
f6ac5f3d 2882/* The prepare_to_generate_core target method. */
aef92902 2883
f6ac5f3d
PA
2884void
2885record_btrace_target::prepare_to_generate_core ()
aef92902
MM
2886{
2887 record_btrace_generating_corefile = 1;
2888}
2889
f6ac5f3d 2890/* The done_generating_core target method. */
aef92902 2891
f6ac5f3d
PA
2892void
2893record_btrace_target::done_generating_core ()
aef92902
MM
2894{
2895 record_btrace_generating_corefile = 0;
2896}
2897
f4abbc16
MM
2898/* Start recording in BTS format. */
2899
2900static void
cdb34d4a 2901cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2902{
f4abbc16
MM
2903 if (args != NULL && *args != 0)
2904 error (_("Invalid argument."));
2905
2906 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2907
a70b8144 2908 try
492d29ea 2909 {
95a6b0a1 2910 execute_command ("target record-btrace", from_tty);
492d29ea 2911 }
230d2906 2912 catch (const gdb_exception &exception)
f4abbc16
MM
2913 {
2914 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2915 throw;
f4abbc16
MM
2916 }
2917}
2918
bc504a31 2919/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2920
2921static void
cdb34d4a 2922cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2923{
2924 if (args != NULL && *args != 0)
2925 error (_("Invalid argument."));
2926
b20a6524 2927 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2928
a70b8144 2929 try
492d29ea 2930 {
95a6b0a1 2931 execute_command ("target record-btrace", from_tty);
492d29ea 2932 }
230d2906 2933 catch (const gdb_exception &exception)
492d29ea
PA
2934 {
2935 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2936 throw;
492d29ea 2937 }
afedecd3
MM
2938}
2939
b20a6524
MM
2940/* Alias for "target record". */
2941
2942static void
981a3fb3 2943cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2944{
2945 if (args != NULL && *args != 0)
2946 error (_("Invalid argument."));
2947
2948 record_btrace_conf.format = BTRACE_FORMAT_PT;
2949
a70b8144 2950 try
b20a6524 2951 {
95a6b0a1 2952 execute_command ("target record-btrace", from_tty);
b20a6524 2953 }
230d2906 2954 catch (const gdb_exception &exception)
b20a6524
MM
2955 {
2956 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2957
a70b8144 2958 try
b20a6524 2959 {
95a6b0a1 2960 execute_command ("target record-btrace", from_tty);
b20a6524 2961 }
230d2906 2962 catch (const gdb_exception &ex)
b20a6524
MM
2963 {
2964 record_btrace_conf.format = BTRACE_FORMAT_NONE;
eedc3f4f 2965 throw;
b20a6524 2966 }
b20a6524 2967 }
b20a6524
MM
2968}
2969
67b5c0c1
MM
2970/* The "show record btrace replay-memory-access" command. */
2971
2972static void
2973cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2974 struct cmd_list_element *c, const char *value)
2975{
2976 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2977 replay_memory_access);
2978}
2979
4a4495d6
MM
2980/* The "set record btrace cpu none" command. */
2981
2982static void
2983cmd_set_record_btrace_cpu_none (const char *args, int from_tty)
2984{
2985 if (args != nullptr && *args != 0)
2986 error (_("Trailing junk: '%s'."), args);
2987
2988 record_btrace_cpu_state = CS_NONE;
2989}
2990
2991/* The "set record btrace cpu auto" command. */
2992
2993static void
2994cmd_set_record_btrace_cpu_auto (const char *args, int from_tty)
2995{
2996 if (args != nullptr && *args != 0)
2997 error (_("Trailing junk: '%s'."), args);
2998
2999 record_btrace_cpu_state = CS_AUTO;
3000}
3001
3002/* The "set record btrace cpu" command. */
3003
3004static void
3005cmd_set_record_btrace_cpu (const char *args, int from_tty)
3006{
3007 if (args == nullptr)
3008 args = "";
3009
3010 /* We use a hard-coded vendor string for now. */
3011 unsigned int family, model, stepping;
3012 int l1, l2, matches = sscanf (args, "intel: %u/%u%n/%u%n", &family,
3013 &model, &l1, &stepping, &l2);
3014 if (matches == 3)
3015 {
3016 if (strlen (args) != l2)
3017 error (_("Trailing junk: '%s'."), args + l2);
3018 }
3019 else if (matches == 2)
3020 {
3021 if (strlen (args) != l1)
3022 error (_("Trailing junk: '%s'."), args + l1);
3023
3024 stepping = 0;
3025 }
3026 else
3027 error (_("Bad format. See \"help set record btrace cpu\"."));
3028
3029 if (USHRT_MAX < family)
3030 error (_("Cpu family too big."));
3031
3032 if (UCHAR_MAX < model)
3033 error (_("Cpu model too big."));
3034
3035 if (UCHAR_MAX < stepping)
3036 error (_("Cpu stepping too big."));
3037
3038 record_btrace_cpu.vendor = CV_INTEL;
3039 record_btrace_cpu.family = family;
3040 record_btrace_cpu.model = model;
3041 record_btrace_cpu.stepping = stepping;
3042
3043 record_btrace_cpu_state = CS_CPU;
3044}
3045
3046/* The "show record btrace cpu" command. */
3047
3048static void
3049cmd_show_record_btrace_cpu (const char *args, int from_tty)
3050{
4a4495d6
MM
3051 if (args != nullptr && *args != 0)
3052 error (_("Trailing junk: '%s'."), args);
3053
3054 switch (record_btrace_cpu_state)
3055 {
3056 case CS_AUTO:
3057 printf_unfiltered (_("btrace cpu is 'auto'.\n"));
3058 return;
3059
3060 case CS_NONE:
3061 printf_unfiltered (_("btrace cpu is 'none'.\n"));
3062 return;
3063
3064 case CS_CPU:
3065 switch (record_btrace_cpu.vendor)
3066 {
3067 case CV_INTEL:
3068 if (record_btrace_cpu.stepping == 0)
3069 printf_unfiltered (_("btrace cpu is 'intel: %u/%u'.\n"),
3070 record_btrace_cpu.family,
3071 record_btrace_cpu.model);
3072 else
3073 printf_unfiltered (_("btrace cpu is 'intel: %u/%u/%u'.\n"),
3074 record_btrace_cpu.family,
3075 record_btrace_cpu.model,
3076 record_btrace_cpu.stepping);
3077 return;
3078 }
3079 }
3080
3081 error (_("Internal error: bad cpu state."));
3082}
3083
b20a6524
MM
3084/* The "record bts buffer-size" show value function. */
3085
3086static void
3087show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3088 struct cmd_list_element *c,
3089 const char *value)
3090{
3091 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3092 value);
3093}
3094
3095/* The "record pt buffer-size" show value function. */
3096
3097static void
3098show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3099 struct cmd_list_element *c,
3100 const char *value)
3101{
3102 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3103 value);
3104}
3105
afedecd3
MM
3106/* Initialize btrace commands. */
3107
6c265988 3108void _initialize_record_btrace ();
afedecd3 3109void
6c265988 3110_initialize_record_btrace ()
afedecd3 3111{
f4abbc16
MM
3112 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3113 _("Start branch trace recording."), &record_btrace_cmdlist,
3114 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3115 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3116
f4abbc16
MM
3117 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3118 _("\
3119Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3120The processor stores a from/to record for each branch into a cyclic buffer.\n\
3121This format may not be available on all processors."),
3122 &record_btrace_cmdlist);
3123 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3124
b20a6524
MM
3125 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3126 _("\
bc504a31 3127Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3128This format may not be available on all processors."),
3129 &record_btrace_cmdlist);
3130 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3131
0743fc83
TT
3132 add_basic_prefix_cmd ("btrace", class_support,
3133 _("Set record options."), &set_record_btrace_cmdlist,
3134 "set record btrace ", 0, &set_record_cmdlist);
67b5c0c1 3135
0743fc83
TT
3136 add_show_prefix_cmd ("btrace", class_support,
3137 _("Show record options."), &show_record_btrace_cmdlist,
3138 "show record btrace ", 0, &show_record_cmdlist);
67b5c0c1
MM
3139
3140 add_setshow_enum_cmd ("replay-memory-access", no_class,
3141 replay_memory_access_types, &replay_memory_access, _("\
3142Set what memory accesses are allowed during replay."), _("\
3143Show what memory accesses are allowed during replay."),
3144 _("Default is READ-ONLY.\n\n\
3145The btrace record target does not trace data.\n\
3146The memory therefore corresponds to the live target and not \
3147to the current replay position.\n\n\
3148When READ-ONLY, allow accesses to read-only memory during replay.\n\
3149When READ-WRITE, allow accesses to read-only and read-write memory during \
3150replay."),
3151 NULL, cmd_show_replay_memory_access,
3152 &set_record_btrace_cmdlist,
3153 &show_record_btrace_cmdlist);
3154
4a4495d6
MM
3155 add_prefix_cmd ("cpu", class_support, cmd_set_record_btrace_cpu,
3156 _("\
3157Set the cpu to be used for trace decode.\n\n\
55063ddb
TT
3158The format is \"VENDOR:IDENTIFIER\" or \"none\" or \"auto\" (default).\n\
3159For vendor \"intel\" the format is \"FAMILY/MODEL[/STEPPING]\".\n\n\
4a4495d6
MM
3160When decoding branch trace, enable errata workarounds for the specified cpu.\n\
3161The default is \"auto\", which uses the cpu on which the trace was recorded.\n\
3162When GDB does not support that cpu, this option can be used to enable\n\
3163workarounds for a similar cpu that GDB supports.\n\n\
3164When set to \"none\", errata workarounds are disabled."),
3165 &set_record_btrace_cpu_cmdlist,
590042fc 3166 "set record btrace cpu ", 1,
4a4495d6
MM
3167 &set_record_btrace_cmdlist);
3168
3169 add_cmd ("auto", class_support, cmd_set_record_btrace_cpu_auto, _("\
3170Automatically determine the cpu to be used for trace decode."),
3171 &set_record_btrace_cpu_cmdlist);
3172
3173 add_cmd ("none", class_support, cmd_set_record_btrace_cpu_none, _("\
3174Do not enable errata workarounds for trace decode."),
3175 &set_record_btrace_cpu_cmdlist);
3176
3177 add_cmd ("cpu", class_support, cmd_show_record_btrace_cpu, _("\
3178Show the cpu to be used for trace decode."),
3179 &show_record_btrace_cmdlist);
3180
0743fc83
TT
3181 add_basic_prefix_cmd ("bts", class_support,
3182 _("Set record btrace bts options."),
3183 &set_record_btrace_bts_cmdlist,
3184 "set record btrace bts ", 0,
3185 &set_record_btrace_cmdlist);
d33501a5 3186
0743fc83
TT
3187 add_show_prefix_cmd ("bts", class_support,
3188 _("Show record btrace bts options."),
3189 &show_record_btrace_bts_cmdlist,
3190 "show record btrace bts ", 0,
3191 &show_record_btrace_cmdlist);
d33501a5
MM
3192
3193 add_setshow_uinteger_cmd ("buffer-size", no_class,
3194 &record_btrace_conf.bts.size,
3195 _("Set the record/replay bts buffer size."),
3196 _("Show the record/replay bts buffer size."), _("\
3197When starting recording request a trace buffer of this size. \
3198The actual buffer size may differ from the requested size. \
3199Use \"info record\" to see the actual buffer size.\n\n\
3200Bigger buffers allow longer recording but also take more time to process \
3201the recorded execution trace.\n\n\
b20a6524
MM
3202The trace buffer size may not be changed while recording."), NULL,
3203 show_record_bts_buffer_size_value,
d33501a5
MM
3204 &set_record_btrace_bts_cmdlist,
3205 &show_record_btrace_bts_cmdlist);
3206
0743fc83
TT
3207 add_basic_prefix_cmd ("pt", class_support,
3208 _("Set record btrace pt options."),
3209 &set_record_btrace_pt_cmdlist,
3210 "set record btrace pt ", 0,
3211 &set_record_btrace_cmdlist);
3212
3213 add_show_prefix_cmd ("pt", class_support,
3214 _("Show record btrace pt options."),
3215 &show_record_btrace_pt_cmdlist,
3216 "show record btrace pt ", 0,
3217 &show_record_btrace_cmdlist);
b20a6524
MM
3218
3219 add_setshow_uinteger_cmd ("buffer-size", no_class,
3220 &record_btrace_conf.pt.size,
3221 _("Set the record/replay pt buffer size."),
3222 _("Show the record/replay pt buffer size."), _("\
3223Bigger buffers allow longer recording but also take more time to process \
3224the recorded execution.\n\
3225The actual buffer size may differ from the requested size. Use \"info record\" \
3226to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3227 &set_record_btrace_pt_cmdlist,
3228 &show_record_btrace_pt_cmdlist);
3229
d9f719f1 3230 add_target (record_btrace_target_info, record_btrace_target_open);
0b722aec
MM
3231
3232 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3233 xcalloc, xfree);
d33501a5
MM
3234
3235 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3236 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3237}