]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
Update copyright year range in all GDB files
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
162/* Callback function to disable branch tracing for one thread. */
163
164static void
165record_btrace_disable_callback (void *arg)
166{
19ba03f4 167 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
168
169 btrace_disable (tp);
170}
171
172/* Enable automatic tracing of new threads. */
173
174static void
175record_btrace_auto_enable (void)
176{
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181}
182
183/* Disable automatic tracing of new threads. */
184
185static void
186record_btrace_auto_disable (void)
187{
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196}
197
70ad5bff
MM
198/* The record-btrace async event handler function. */
199
200static void
201record_btrace_handle_async_inferior_event (gdb_client_data data)
202{
203 inferior_event_handler (INF_REG_EVENT, NULL);
204}
205
c0272db5
TW
206/* See record-btrace.h. */
207
208void
209record_btrace_push_target (void)
210{
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224}
225
afedecd3
MM
226/* The to_open method of target record-btrace. */
227
228static void
014f9477 229record_btrace_open (const char *args, int from_tty)
afedecd3
MM
230{
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
8213266a 236 record_preopen ();
afedecd3
MM
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
afedecd3
MM
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 244 ALL_NON_EXITED_THREADS (tp)
5d5658a1 245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 246 {
f4abbc16 247 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
c0272db5 252 record_btrace_push_target ();
afedecd3
MM
253
254 discard_cleanups (disable_chain);
255}
256
257/* The to_stop_recording method of target record-btrace. */
258
259static void
c6cd7c02 260record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
261{
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
034f788c 268 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271}
272
c0272db5
TW
273/* The to_disconnect method of target record-btrace. */
274
275static void
276record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278{
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286}
287
afedecd3
MM
288/* The to_close method of target record-btrace. */
289
290static void
de90e03d 291record_btrace_close (struct target_ops *self)
afedecd3 292{
568e808b
MM
293 struct thread_info *tp;
294
70ad5bff
MM
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
99c819ee
MM
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
568e808b
MM
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
034f788c 304 ALL_NON_EXITED_THREADS (tp)
568e808b 305 btrace_teardown (tp);
afedecd3
MM
306}
307
b7d2e916
PA
308/* The to_async method of target record-btrace. */
309
310static void
6a3753b3 311record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 312{
6a3753b3 313 if (enable)
b7d2e916
PA
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
6a3753b3 318 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
319}
320
d33501a5
MM
321/* Adjusts the size and returns a human readable size suffix. */
322
323static const char *
324record_btrace_adjust_size (unsigned int *size)
325{
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347}
348
349/* Print a BTS configuration. */
350
351static void
352record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353{
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363}
364
bc504a31 365/* Print an Intel Processor Trace configuration. */
b20a6524
MM
366
367static void
368record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369{
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379}
380
d33501a5
MM
381/* Print a branch tracing configuration. */
382
383static void
384record_btrace_print_conf (const struct btrace_config *conf)
385{
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
b20a6524
MM
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
d33501a5
MM
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404}
405
afedecd3
MM
406/* The to_info_record method of target record-btrace. */
407
408static void
630d6a4a 409record_btrace_info (struct target_ops *self)
afedecd3
MM
410{
411 struct btrace_thread_info *btinfo;
f4abbc16 412 const struct btrace_config *conf;
afedecd3 413 struct thread_info *tp;
31fd9caa 414 unsigned int insns, calls, gaps;
afedecd3
MM
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
cd4007e4
MM
422 validate_registers_access ();
423
f4abbc16
MM
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
d33501a5 428 record_btrace_print_conf (conf);
f4abbc16 429
afedecd3
MM
430 btrace_fetch (tp);
431
23a7fe75
MM
432 insns = 0;
433 calls = 0;
31fd9caa 434 gaps = 0;
23a7fe75 435
6e07b1d2 436 if (!btrace_is_empty (tp))
23a7fe75
MM
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
5de9129b 443 calls = btrace_call_number (&call);
23a7fe75
MM
444
445 btrace_insn_end (&insn, btinfo);
5de9129b 446 insns = btrace_insn_number (&insn);
31fd9caa 447
69090cee
TW
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
31fd9caa
MM
452
453 gaps = btinfo->ngaps;
23a7fe75 454 }
afedecd3 455
31fd9caa 456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
afedecd3
MM
463}
464
31fd9caa
MM
465/* Print a decode error. */
466
467static void
468btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470{
508352a9 471 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 472
112e8700 473 uiout->text (_("["));
508352a9
TW
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 476 {
112e8700
SM
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
31fd9caa 480 }
112e8700
SM
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
31fd9caa
MM
483}
484
afedecd3
MM
485/* Print an unsigned int. */
486
487static void
488ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489{
112e8700 490 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
491}
492
f94cc897
MM
493/* A range of source lines. */
494
495struct btrace_line_range
496{
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505};
506
507/* Construct a line range. */
508
509static struct btrace_line_range
510btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511{
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519}
520
521/* Add a line to a line range. */
522
523static struct btrace_line_range
524btrace_line_range_add (struct btrace_line_range range, int line)
525{
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538}
539
540/* Return non-zero if RANGE is empty, zero otherwise. */
541
542static int
543btrace_line_range_is_empty (struct btrace_line_range range)
544{
545 return range.end <= range.begin;
546}
547
548/* Return non-zero if LHS contains RHS, zero otherwise. */
549
550static int
551btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553{
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557}
558
559/* Find the line range associated with PC. */
560
561static struct btrace_line_range
562btrace_find_line_range (CORE_ADDR pc)
563{
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591}
592
593/* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602static void
603btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605{
8d297bbf 606 print_source_lines_flags psl_flags;
f94cc897
MM
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625}
626
afedecd3
MM
627/* Disassemble a section of the recorded instruction trace. */
628
629static void
23a7fe75 630btrace_insn_history (struct ui_out *uiout,
31fd9caa 631 const struct btrace_thread_info *btinfo,
23a7fe75 632 const struct btrace_insn_iterator *begin,
9a24775b
PA
633 const struct btrace_insn_iterator *end,
634 gdb_disassembly_flags flags)
afedecd3 635{
f94cc897 636 struct cleanup *cleanups, *ui_item_chain;
afedecd3 637 struct gdbarch *gdbarch;
23a7fe75 638 struct btrace_insn_iterator it;
f94cc897 639 struct btrace_line_range last_lines;
afedecd3 640
9a24775b
PA
641 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
642 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 643
f94cc897
MM
644 flags |= DISASSEMBLY_SPECULATIVE;
645
afedecd3 646 gdbarch = target_gdbarch ();
f94cc897
MM
647 last_lines = btrace_mk_line_range (NULL, 0, 0);
648
187808b0 649 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
f94cc897
MM
650
651 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
652 instructions corresponding to that line. */
653 ui_item_chain = NULL;
afedecd3 654
8b172ce7
PA
655 gdb_pretty_print_disassembler disasm (gdbarch);
656
23a7fe75 657 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 658 {
23a7fe75
MM
659 const struct btrace_insn *insn;
660
661 insn = btrace_insn_get (&it);
662
31fd9caa
MM
663 /* A NULL instruction indicates a gap in the trace. */
664 if (insn == NULL)
665 {
666 const struct btrace_config *conf;
667
668 conf = btrace_conf (btinfo);
afedecd3 669
31fd9caa
MM
670 /* We have trace so we must have a configuration. */
671 gdb_assert (conf != NULL);
672
69090cee
TW
673 uiout->field_fmt ("insn-number", "%u",
674 btrace_insn_number (&it));
675 uiout->text ("\t");
676
677 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
678 conf->format);
679 }
680 else
681 {
f94cc897 682 struct disasm_insn dinsn;
da8c46d2 683
f94cc897 684 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 685 {
f94cc897
MM
686 struct btrace_line_range lines;
687
688 lines = btrace_find_line_range (insn->pc);
689 if (!btrace_line_range_is_empty (lines)
690 && !btrace_line_range_contains_range (last_lines, lines))
691 {
692 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
693 last_lines = lines;
694 }
695 else if (ui_item_chain == NULL)
696 {
697 ui_item_chain
698 = make_cleanup_ui_out_tuple_begin_end (uiout,
699 "src_and_asm_line");
700 /* No source information. */
701 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
702 }
703
704 gdb_assert (ui_item_chain != NULL);
da8c46d2 705 }
da8c46d2 706
f94cc897
MM
707 memset (&dinsn, 0, sizeof (dinsn));
708 dinsn.number = btrace_insn_number (&it);
709 dinsn.addr = insn->pc;
31fd9caa 710
da8c46d2 711 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 712 dinsn.is_speculative = 1;
da8c46d2 713
8b172ce7 714 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 715 }
afedecd3 716 }
f94cc897
MM
717
718 do_cleanups (cleanups);
afedecd3
MM
719}
720
721/* The to_insn_history method of target record-btrace. */
722
723static void
9a24775b
PA
724record_btrace_insn_history (struct target_ops *self, int size,
725 gdb_disassembly_flags flags)
afedecd3
MM
726{
727 struct btrace_thread_info *btinfo;
23a7fe75
MM
728 struct btrace_insn_history *history;
729 struct btrace_insn_iterator begin, end;
afedecd3 730 struct ui_out *uiout;
23a7fe75 731 unsigned int context, covered;
afedecd3
MM
732
733 uiout = current_uiout;
2e783024 734 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 735 context = abs (size);
afedecd3
MM
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
23a7fe75
MM
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
afedecd3 742 {
07bbe694 743 struct btrace_insn_iterator *replay;
afedecd3 744
9a24775b 745 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 746
07bbe694
MM
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
afedecd3
MM
771 }
772 else
773 {
23a7fe75
MM
774 begin = history->begin;
775 end = history->end;
afedecd3 776
9a24775b 777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 778 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 779
23a7fe75
MM
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
afedecd3
MM
790 }
791
23a7fe75 792 if (covered > 0)
31fd9caa 793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
afedecd3 801
23a7fe75 802 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
803}
804
805/* The to_insn_history_range method of target record-btrace. */
806
807static void
4e99c6b7 808record_btrace_insn_history_range (struct target_ops *self,
9a24775b
PA
809 ULONGEST from, ULONGEST to,
810 gdb_disassembly_flags flags)
afedecd3
MM
811{
812 struct btrace_thread_info *btinfo;
23a7fe75 813 struct btrace_insn_iterator begin, end;
afedecd3 814 struct ui_out *uiout;
23a7fe75
MM
815 unsigned int low, high;
816 int found;
afedecd3
MM
817
818 uiout = current_uiout;
2e783024 819 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
820 low = from;
821 high = to;
afedecd3 822
9a24775b 823 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
824
825 /* Check for wrap-arounds. */
23a7fe75 826 if (low != from || high != to)
afedecd3
MM
827 error (_("Bad range."));
828
0688d04e 829 if (high < low)
afedecd3
MM
830 error (_("Bad range."));
831
23a7fe75 832 btinfo = require_btrace ();
afedecd3 833
23a7fe75
MM
834 found = btrace_find_insn_by_number (&begin, btinfo, low);
835 if (found == 0)
836 error (_("Range out of bounds."));
afedecd3 837
23a7fe75
MM
838 found = btrace_find_insn_by_number (&end, btinfo, high);
839 if (found == 0)
0688d04e
MM
840 {
841 /* Silently truncate the range. */
842 btrace_insn_end (&end, btinfo);
843 }
844 else
845 {
846 /* We want both begin and end to be inclusive. */
847 btrace_insn_next (&end, 1);
848 }
afedecd3 849
31fd9caa 850 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 851 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
852}
853
854/* The to_insn_history_from method of target record-btrace. */
855
856static void
9abc3ff3 857record_btrace_insn_history_from (struct target_ops *self,
9a24775b
PA
858 ULONGEST from, int size,
859 gdb_disassembly_flags flags)
afedecd3
MM
860{
861 ULONGEST begin, end, context;
862
863 context = abs (size);
0688d04e
MM
864 if (context == 0)
865 error (_("Bad record instruction-history-size."));
afedecd3
MM
866
867 if (size < 0)
868 {
869 end = from;
870
871 if (from < context)
872 begin = 0;
873 else
0688d04e 874 begin = from - context + 1;
afedecd3
MM
875 }
876 else
877 {
878 begin = from;
0688d04e 879 end = from + context - 1;
afedecd3
MM
880
881 /* Check for wrap-around. */
882 if (end < begin)
883 end = ULONGEST_MAX;
884 }
885
4e99c6b7 886 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
887}
888
889/* Print the instruction number range for a function call history line. */
890
891static void
23a7fe75
MM
892btrace_call_history_insn_range (struct ui_out *uiout,
893 const struct btrace_function *bfun)
afedecd3 894{
7acbe133
MM
895 unsigned int begin, end, size;
896
0860c437 897 size = bfun->insn.size ();
7acbe133 898 gdb_assert (size > 0);
afedecd3 899
23a7fe75 900 begin = bfun->insn_offset;
7acbe133 901 end = begin + size - 1;
afedecd3 902
23a7fe75 903 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 904 uiout->text (",");
23a7fe75 905 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
906}
907
ce0dfbea
MM
908/* Compute the lowest and highest source line for the instructions in BFUN
909 and return them in PBEGIN and PEND.
910 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
911 result from inlining or macro expansion. */
912
913static void
914btrace_compute_src_line_range (const struct btrace_function *bfun,
915 int *pbegin, int *pend)
916{
ce0dfbea
MM
917 struct symtab *symtab;
918 struct symbol *sym;
ce0dfbea
MM
919 int begin, end;
920
921 begin = INT_MAX;
922 end = INT_MIN;
923
924 sym = bfun->sym;
925 if (sym == NULL)
926 goto out;
927
928 symtab = symbol_symtab (sym);
929
0860c437 930 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
931 {
932 struct symtab_and_line sal;
933
0860c437 934 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
935 if (sal.symtab != symtab || sal.line == 0)
936 continue;
937
325fac50
PA
938 begin = std::min (begin, sal.line);
939 end = std::max (end, sal.line);
ce0dfbea
MM
940 }
941
942 out:
943 *pbegin = begin;
944 *pend = end;
945}
946
afedecd3
MM
947/* Print the source line information for a function call history line. */
948
949static void
23a7fe75
MM
950btrace_call_history_src_line (struct ui_out *uiout,
951 const struct btrace_function *bfun)
afedecd3
MM
952{
953 struct symbol *sym;
23a7fe75 954 int begin, end;
afedecd3
MM
955
956 sym = bfun->sym;
957 if (sym == NULL)
958 return;
959
112e8700 960 uiout->field_string ("file",
08be3fe3 961 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 962
ce0dfbea 963 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 964 if (end < begin)
afedecd3
MM
965 return;
966
112e8700
SM
967 uiout->text (":");
968 uiout->field_int ("min line", begin);
afedecd3 969
23a7fe75 970 if (end == begin)
afedecd3
MM
971 return;
972
112e8700
SM
973 uiout->text (",");
974 uiout->field_int ("max line", end);
afedecd3
MM
975}
976
0b722aec
MM
977/* Get the name of a branch trace function. */
978
979static const char *
980btrace_get_bfun_name (const struct btrace_function *bfun)
981{
982 struct minimal_symbol *msym;
983 struct symbol *sym;
984
985 if (bfun == NULL)
986 return "??";
987
988 msym = bfun->msym;
989 sym = bfun->sym;
990
991 if (sym != NULL)
992 return SYMBOL_PRINT_NAME (sym);
993 else if (msym != NULL)
efd66ac6 994 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
995 else
996 return "??";
997}
998
afedecd3
MM
999/* Disassemble a section of the recorded function trace. */
1000
1001static void
23a7fe75 1002btrace_call_history (struct ui_out *uiout,
8710b709 1003 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1004 const struct btrace_call_iterator *begin,
1005 const struct btrace_call_iterator *end,
8d297bbf 1006 int int_flags)
afedecd3 1007{
23a7fe75 1008 struct btrace_call_iterator it;
8d297bbf 1009 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1010
8d297bbf 1011 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1012 btrace_call_number (end));
afedecd3 1013
23a7fe75 1014 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1015 {
23a7fe75
MM
1016 const struct btrace_function *bfun;
1017 struct minimal_symbol *msym;
1018 struct symbol *sym;
1019
1020 bfun = btrace_call_get (&it);
23a7fe75 1021 sym = bfun->sym;
0b722aec 1022 msym = bfun->msym;
23a7fe75 1023
afedecd3 1024 /* Print the function index. */
23a7fe75 1025 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1026 uiout->text ("\t");
afedecd3 1027
31fd9caa
MM
1028 /* Indicate gaps in the trace. */
1029 if (bfun->errcode != 0)
1030 {
1031 const struct btrace_config *conf;
1032
1033 conf = btrace_conf (btinfo);
1034
1035 /* We have trace so we must have a configuration. */
1036 gdb_assert (conf != NULL);
1037
1038 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1039
1040 continue;
1041 }
1042
8710b709
MM
1043 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1044 {
1045 int level = bfun->level + btinfo->level, i;
1046
1047 for (i = 0; i < level; ++i)
112e8700 1048 uiout->text (" ");
8710b709
MM
1049 }
1050
1051 if (sym != NULL)
112e8700 1052 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1053 else if (msym != NULL)
112e8700
SM
1054 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1055 else if (!uiout->is_mi_like_p ())
1056 uiout->field_string ("function", "??");
8710b709 1057
1e038f67 1058 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1059 {
112e8700 1060 uiout->text (_("\tinst "));
23a7fe75 1061 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1062 }
1063
1e038f67 1064 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1065 {
112e8700 1066 uiout->text (_("\tat "));
23a7fe75 1067 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1068 }
1069
112e8700 1070 uiout->text ("\n");
afedecd3
MM
1071 }
1072}
1073
1074/* The to_call_history method of target record-btrace. */
1075
1076static void
8d297bbf 1077record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1078{
1079 struct btrace_thread_info *btinfo;
23a7fe75
MM
1080 struct btrace_call_history *history;
1081 struct btrace_call_iterator begin, end;
afedecd3 1082 struct ui_out *uiout;
23a7fe75 1083 unsigned int context, covered;
8d297bbf 1084 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1085
1086 uiout = current_uiout;
2e783024 1087 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1088 context = abs (size);
afedecd3
MM
1089 if (context == 0)
1090 error (_("Bad record function-call-history-size."));
1091
23a7fe75
MM
1092 btinfo = require_btrace ();
1093 history = btinfo->call_history;
1094 if (history == NULL)
afedecd3 1095 {
07bbe694 1096 struct btrace_insn_iterator *replay;
afedecd3 1097
8d297bbf 1098 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1099
07bbe694
MM
1100 /* If we're replaying, we start at the replay position. Otherwise, we
1101 start at the tail of the trace. */
1102 replay = btinfo->replay;
1103 if (replay != NULL)
1104 {
07bbe694 1105 begin.btinfo = btinfo;
a0f1b963 1106 begin.index = replay->call_index;
07bbe694
MM
1107 }
1108 else
1109 btrace_call_end (&begin, btinfo);
1110
1111 /* We start from here and expand in the requested direction. Then we
1112 expand in the other direction, as well, to fill up any remaining
1113 context. */
1114 end = begin;
1115 if (size < 0)
1116 {
1117 /* We want the current position covered, as well. */
1118 covered = btrace_call_next (&end, 1);
1119 covered += btrace_call_prev (&begin, context - covered);
1120 covered += btrace_call_next (&end, context - covered);
1121 }
1122 else
1123 {
1124 covered = btrace_call_next (&end, context);
1125 covered += btrace_call_prev (&begin, context- covered);
1126 }
afedecd3
MM
1127 }
1128 else
1129 {
23a7fe75
MM
1130 begin = history->begin;
1131 end = history->end;
afedecd3 1132
8d297bbf 1133 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1134 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1135
23a7fe75
MM
1136 if (size < 0)
1137 {
1138 end = begin;
1139 covered = btrace_call_prev (&begin, context);
1140 }
1141 else
1142 {
1143 begin = end;
1144 covered = btrace_call_next (&end, context);
1145 }
afedecd3
MM
1146 }
1147
23a7fe75 1148 if (covered > 0)
8710b709 1149 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1150 else
1151 {
1152 if (size < 0)
1153 printf_unfiltered (_("At the start of the branch trace record.\n"));
1154 else
1155 printf_unfiltered (_("At the end of the branch trace record.\n"));
1156 }
afedecd3 1157
23a7fe75 1158 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1159}
1160
1161/* The to_call_history_range method of target record-btrace. */
1162
1163static void
f0d960ea 1164record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1165 ULONGEST from, ULONGEST to,
1166 int int_flags)
afedecd3
MM
1167{
1168 struct btrace_thread_info *btinfo;
23a7fe75 1169 struct btrace_call_iterator begin, end;
afedecd3 1170 struct ui_out *uiout;
23a7fe75
MM
1171 unsigned int low, high;
1172 int found;
8d297bbf 1173 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1174
1175 uiout = current_uiout;
2e783024 1176 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1177 low = from;
1178 high = to;
afedecd3 1179
8d297bbf 1180 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1181
1182 /* Check for wrap-arounds. */
23a7fe75 1183 if (low != from || high != to)
afedecd3
MM
1184 error (_("Bad range."));
1185
0688d04e 1186 if (high < low)
afedecd3
MM
1187 error (_("Bad range."));
1188
23a7fe75 1189 btinfo = require_btrace ();
afedecd3 1190
23a7fe75
MM
1191 found = btrace_find_call_by_number (&begin, btinfo, low);
1192 if (found == 0)
1193 error (_("Range out of bounds."));
afedecd3 1194
23a7fe75
MM
1195 found = btrace_find_call_by_number (&end, btinfo, high);
1196 if (found == 0)
0688d04e
MM
1197 {
1198 /* Silently truncate the range. */
1199 btrace_call_end (&end, btinfo);
1200 }
1201 else
1202 {
1203 /* We want both begin and end to be inclusive. */
1204 btrace_call_next (&end, 1);
1205 }
afedecd3 1206
8710b709 1207 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1208 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1209}
1210
1211/* The to_call_history_from method of target record-btrace. */
1212
1213static void
ec0aea04 1214record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1215 ULONGEST from, int size,
1216 int int_flags)
afedecd3
MM
1217{
1218 ULONGEST begin, end, context;
8d297bbf 1219 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1220
1221 context = abs (size);
0688d04e
MM
1222 if (context == 0)
1223 error (_("Bad record function-call-history-size."));
afedecd3
MM
1224
1225 if (size < 0)
1226 {
1227 end = from;
1228
1229 if (from < context)
1230 begin = 0;
1231 else
0688d04e 1232 begin = from - context + 1;
afedecd3
MM
1233 }
1234 else
1235 {
1236 begin = from;
0688d04e 1237 end = from + context - 1;
afedecd3
MM
1238
1239 /* Check for wrap-around. */
1240 if (end < begin)
1241 end = ULONGEST_MAX;
1242 }
1243
f0d960ea 1244 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1245}
1246
b158a20f
TW
1247/* The to_record_method method of target record-btrace. */
1248
1249static enum record_method
1250record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1251{
b158a20f
TW
1252 struct thread_info * const tp = find_thread_ptid (ptid);
1253
1254 if (tp == NULL)
1255 error (_("No thread."));
1256
1257 if (tp->btrace.target == NULL)
1258 return RECORD_METHOD_NONE;
1259
1260 return RECORD_METHOD_BTRACE;
1261}
1262
07bbe694
MM
1263/* The to_record_is_replaying method of target record-btrace. */
1264
1265static int
a52eab48 1266record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1267{
1268 struct thread_info *tp;
1269
034f788c 1270 ALL_NON_EXITED_THREADS (tp)
a52eab48 1271 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1272 return 1;
1273
1274 return 0;
1275}
1276
7ff27e9b
MM
1277/* The to_record_will_replay method of target record-btrace. */
1278
1279static int
1280record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1281{
1282 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1283}
1284
633785ff
MM
1285/* The to_xfer_partial method of target record-btrace. */
1286
9b409511 1287static enum target_xfer_status
633785ff
MM
1288record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1289 const char *annex, gdb_byte *readbuf,
1290 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1291 ULONGEST len, ULONGEST *xfered_len)
633785ff 1292{
633785ff 1293 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1294 if (replay_memory_access == replay_memory_access_read_only
aef92902 1295 && !record_btrace_generating_corefile
4d10e986 1296 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1297 {
1298 switch (object)
1299 {
1300 case TARGET_OBJECT_MEMORY:
1301 {
1302 struct target_section *section;
1303
1304 /* We do not allow writing memory in general. */
1305 if (writebuf != NULL)
9b409511
YQ
1306 {
1307 *xfered_len = len;
bc113b4e 1308 return TARGET_XFER_UNAVAILABLE;
9b409511 1309 }
633785ff
MM
1310
1311 /* We allow reading readonly memory. */
1312 section = target_section_by_addr (ops, offset);
1313 if (section != NULL)
1314 {
1315 /* Check if the section we found is readonly. */
1316 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1317 section->the_bfd_section)
1318 & SEC_READONLY) != 0)
1319 {
1320 /* Truncate the request to fit into this section. */
325fac50 1321 len = std::min (len, section->endaddr - offset);
633785ff
MM
1322 break;
1323 }
1324 }
1325
9b409511 1326 *xfered_len = len;
bc113b4e 1327 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1328 }
1329 }
1330 }
1331
1332 /* Forward the request. */
e75fdfca
TT
1333 ops = ops->beneath;
1334 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1335 offset, len, xfered_len);
633785ff
MM
1336}
1337
1338/* The to_insert_breakpoint method of target record-btrace. */
1339
1340static int
1341record_btrace_insert_breakpoint (struct target_ops *ops,
1342 struct gdbarch *gdbarch,
1343 struct bp_target_info *bp_tgt)
1344{
67b5c0c1
MM
1345 const char *old;
1346 int ret;
633785ff
MM
1347
1348 /* Inserting breakpoints requires accessing memory. Allow it for the
1349 duration of this function. */
67b5c0c1
MM
1350 old = replay_memory_access;
1351 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1352
1353 ret = 0;
492d29ea
PA
1354 TRY
1355 {
1356 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1357 }
492d29ea
PA
1358 CATCH (except, RETURN_MASK_ALL)
1359 {
6c63c96a 1360 replay_memory_access = old;
492d29ea
PA
1361 throw_exception (except);
1362 }
1363 END_CATCH
6c63c96a 1364 replay_memory_access = old;
633785ff
MM
1365
1366 return ret;
1367}
1368
1369/* The to_remove_breakpoint method of target record-btrace. */
1370
1371static int
1372record_btrace_remove_breakpoint (struct target_ops *ops,
1373 struct gdbarch *gdbarch,
73971819
PA
1374 struct bp_target_info *bp_tgt,
1375 enum remove_bp_reason reason)
633785ff 1376{
67b5c0c1
MM
1377 const char *old;
1378 int ret;
633785ff
MM
1379
1380 /* Removing breakpoints requires accessing memory. Allow it for the
1381 duration of this function. */
67b5c0c1
MM
1382 old = replay_memory_access;
1383 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1384
1385 ret = 0;
492d29ea
PA
1386 TRY
1387 {
73971819
PA
1388 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1389 reason);
492d29ea 1390 }
492d29ea
PA
1391 CATCH (except, RETURN_MASK_ALL)
1392 {
6c63c96a 1393 replay_memory_access = old;
492d29ea
PA
1394 throw_exception (except);
1395 }
1396 END_CATCH
6c63c96a 1397 replay_memory_access = old;
633785ff
MM
1398
1399 return ret;
1400}
1401
1f3ef581
MM
1402/* The to_fetch_registers method of target record-btrace. */
1403
1404static void
1405record_btrace_fetch_registers (struct target_ops *ops,
1406 struct regcache *regcache, int regno)
1407{
1408 struct btrace_insn_iterator *replay;
1409 struct thread_info *tp;
1410
bcc0c096 1411 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1412 gdb_assert (tp != NULL);
1413
1414 replay = tp->btrace.replay;
aef92902 1415 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1416 {
1417 const struct btrace_insn *insn;
1418 struct gdbarch *gdbarch;
1419 int pcreg;
1420
ac7936df 1421 gdbarch = regcache->arch ();
1f3ef581
MM
1422 pcreg = gdbarch_pc_regnum (gdbarch);
1423 if (pcreg < 0)
1424 return;
1425
1426 /* We can only provide the PC register. */
1427 if (regno >= 0 && regno != pcreg)
1428 return;
1429
1430 insn = btrace_insn_get (replay);
1431 gdb_assert (insn != NULL);
1432
1433 regcache_raw_supply (regcache, regno, &insn->pc);
1434 }
1435 else
1436 {
e75fdfca 1437 struct target_ops *t = ops->beneath;
1f3ef581 1438
e75fdfca 1439 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1440 }
1441}
1442
1443/* The to_store_registers method of target record-btrace. */
1444
1445static void
1446record_btrace_store_registers (struct target_ops *ops,
1447 struct regcache *regcache, int regno)
1448{
1449 struct target_ops *t;
1450
a52eab48 1451 if (!record_btrace_generating_corefile
bcc0c096 1452 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1453 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1454
1455 gdb_assert (may_write_registers != 0);
1456
e75fdfca
TT
1457 t = ops->beneath;
1458 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1459}
1460
1461/* The to_prepare_to_store method of target record-btrace. */
1462
1463static void
1464record_btrace_prepare_to_store (struct target_ops *ops,
1465 struct regcache *regcache)
1466{
1467 struct target_ops *t;
1468
a52eab48 1469 if (!record_btrace_generating_corefile
bcc0c096 1470 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1471 return;
1472
e75fdfca
TT
1473 t = ops->beneath;
1474 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1475}
1476
0b722aec
MM
1477/* The branch trace frame cache. */
1478
1479struct btrace_frame_cache
1480{
1481 /* The thread. */
1482 struct thread_info *tp;
1483
1484 /* The frame info. */
1485 struct frame_info *frame;
1486
1487 /* The branch trace function segment. */
1488 const struct btrace_function *bfun;
1489};
1490
1491/* A struct btrace_frame_cache hash table indexed by NEXT. */
1492
1493static htab_t bfcache;
1494
1495/* hash_f for htab_create_alloc of bfcache. */
1496
1497static hashval_t
1498bfcache_hash (const void *arg)
1499{
19ba03f4
SM
1500 const struct btrace_frame_cache *cache
1501 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1502
1503 return htab_hash_pointer (cache->frame);
1504}
1505
1506/* eq_f for htab_create_alloc of bfcache. */
1507
1508static int
1509bfcache_eq (const void *arg1, const void *arg2)
1510{
19ba03f4
SM
1511 const struct btrace_frame_cache *cache1
1512 = (const struct btrace_frame_cache *) arg1;
1513 const struct btrace_frame_cache *cache2
1514 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1515
1516 return cache1->frame == cache2->frame;
1517}
1518
1519/* Create a new btrace frame cache. */
1520
1521static struct btrace_frame_cache *
1522bfcache_new (struct frame_info *frame)
1523{
1524 struct btrace_frame_cache *cache;
1525 void **slot;
1526
1527 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1528 cache->frame = frame;
1529
1530 slot = htab_find_slot (bfcache, cache, INSERT);
1531 gdb_assert (*slot == NULL);
1532 *slot = cache;
1533
1534 return cache;
1535}
1536
1537/* Extract the branch trace function from a branch trace frame. */
1538
1539static const struct btrace_function *
1540btrace_get_frame_function (struct frame_info *frame)
1541{
1542 const struct btrace_frame_cache *cache;
0b722aec
MM
1543 struct btrace_frame_cache pattern;
1544 void **slot;
1545
1546 pattern.frame = frame;
1547
1548 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1549 if (slot == NULL)
1550 return NULL;
1551
19ba03f4 1552 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1553 return cache->bfun;
1554}
1555
cecac1ab
MM
1556/* Implement stop_reason method for record_btrace_frame_unwind. */
1557
1558static enum unwind_stop_reason
1559record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1560 void **this_cache)
1561{
0b722aec
MM
1562 const struct btrace_frame_cache *cache;
1563 const struct btrace_function *bfun;
1564
19ba03f4 1565 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1566 bfun = cache->bfun;
1567 gdb_assert (bfun != NULL);
1568
42bfe59e 1569 if (bfun->up == 0)
0b722aec
MM
1570 return UNWIND_UNAVAILABLE;
1571
1572 return UNWIND_NO_REASON;
cecac1ab
MM
1573}
1574
1575/* Implement this_id method for record_btrace_frame_unwind. */
1576
1577static void
1578record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1579 struct frame_id *this_id)
1580{
0b722aec
MM
1581 const struct btrace_frame_cache *cache;
1582 const struct btrace_function *bfun;
4aeb0dfc 1583 struct btrace_call_iterator it;
0b722aec
MM
1584 CORE_ADDR code, special;
1585
19ba03f4 1586 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1587
1588 bfun = cache->bfun;
1589 gdb_assert (bfun != NULL);
1590
4aeb0dfc
TW
1591 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1592 bfun = btrace_call_get (&it);
0b722aec
MM
1593
1594 code = get_frame_func (this_frame);
1595 special = bfun->number;
1596
1597 *this_id = frame_id_build_unavailable_stack_special (code, special);
1598
1599 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1600 btrace_get_bfun_name (cache->bfun),
1601 core_addr_to_string_nz (this_id->code_addr),
1602 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1603}
1604
1605/* Implement prev_register method for record_btrace_frame_unwind. */
1606
1607static struct value *
1608record_btrace_frame_prev_register (struct frame_info *this_frame,
1609 void **this_cache,
1610 int regnum)
1611{
0b722aec
MM
1612 const struct btrace_frame_cache *cache;
1613 const struct btrace_function *bfun, *caller;
42bfe59e 1614 struct btrace_call_iterator it;
0b722aec
MM
1615 struct gdbarch *gdbarch;
1616 CORE_ADDR pc;
1617 int pcreg;
1618
1619 gdbarch = get_frame_arch (this_frame);
1620 pcreg = gdbarch_pc_regnum (gdbarch);
1621 if (pcreg < 0 || regnum != pcreg)
1622 throw_error (NOT_AVAILABLE_ERROR,
1623 _("Registers are not available in btrace record history"));
1624
19ba03f4 1625 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1626 bfun = cache->bfun;
1627 gdb_assert (bfun != NULL);
1628
42bfe59e 1629 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1630 throw_error (NOT_AVAILABLE_ERROR,
1631 _("No caller in btrace record history"));
1632
42bfe59e
TW
1633 caller = btrace_call_get (&it);
1634
0b722aec 1635 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1636 pc = caller->insn.front ().pc;
0b722aec
MM
1637 else
1638 {
0860c437 1639 pc = caller->insn.back ().pc;
0b722aec
MM
1640 pc += gdb_insn_length (gdbarch, pc);
1641 }
1642
1643 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1644 btrace_get_bfun_name (bfun), bfun->level,
1645 core_addr_to_string_nz (pc));
1646
1647 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1648}
1649
1650/* Implement sniffer method for record_btrace_frame_unwind. */
1651
1652static int
1653record_btrace_frame_sniffer (const struct frame_unwind *self,
1654 struct frame_info *this_frame,
1655 void **this_cache)
1656{
0b722aec
MM
1657 const struct btrace_function *bfun;
1658 struct btrace_frame_cache *cache;
cecac1ab 1659 struct thread_info *tp;
0b722aec 1660 struct frame_info *next;
cecac1ab
MM
1661
1662 /* THIS_FRAME does not contain a reference to its thread. */
1663 tp = find_thread_ptid (inferior_ptid);
1664 gdb_assert (tp != NULL);
1665
0b722aec
MM
1666 bfun = NULL;
1667 next = get_next_frame (this_frame);
1668 if (next == NULL)
1669 {
1670 const struct btrace_insn_iterator *replay;
1671
1672 replay = tp->btrace.replay;
1673 if (replay != NULL)
08c3f6d2 1674 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1675 }
1676 else
1677 {
1678 const struct btrace_function *callee;
42bfe59e 1679 struct btrace_call_iterator it;
0b722aec
MM
1680
1681 callee = btrace_get_frame_function (next);
42bfe59e
TW
1682 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1683 return 0;
1684
1685 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1686 return 0;
1687
1688 bfun = btrace_call_get (&it);
0b722aec
MM
1689 }
1690
1691 if (bfun == NULL)
1692 return 0;
1693
1694 DEBUG ("[frame] sniffed frame for %s on level %d",
1695 btrace_get_bfun_name (bfun), bfun->level);
1696
1697 /* This is our frame. Initialize the frame cache. */
1698 cache = bfcache_new (this_frame);
1699 cache->tp = tp;
1700 cache->bfun = bfun;
1701
1702 *this_cache = cache;
1703 return 1;
1704}
1705
1706/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1707
1708static int
1709record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1710 struct frame_info *this_frame,
1711 void **this_cache)
1712{
1713 const struct btrace_function *bfun, *callee;
1714 struct btrace_frame_cache *cache;
42bfe59e 1715 struct btrace_call_iterator it;
0b722aec 1716 struct frame_info *next;
42bfe59e 1717 struct thread_info *tinfo;
0b722aec
MM
1718
1719 next = get_next_frame (this_frame);
1720 if (next == NULL)
1721 return 0;
1722
1723 callee = btrace_get_frame_function (next);
1724 if (callee == NULL)
1725 return 0;
1726
1727 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1728 return 0;
1729
42bfe59e
TW
1730 tinfo = find_thread_ptid (inferior_ptid);
1731 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1732 return 0;
1733
42bfe59e
TW
1734 bfun = btrace_call_get (&it);
1735
0b722aec
MM
1736 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1737 btrace_get_bfun_name (bfun), bfun->level);
1738
1739 /* This is our frame. Initialize the frame cache. */
1740 cache = bfcache_new (this_frame);
42bfe59e 1741 cache->tp = tinfo;
0b722aec
MM
1742 cache->bfun = bfun;
1743
1744 *this_cache = cache;
1745 return 1;
1746}
1747
1748static void
1749record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1750{
1751 struct btrace_frame_cache *cache;
1752 void **slot;
1753
19ba03f4 1754 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1755
1756 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1757 gdb_assert (slot != NULL);
1758
1759 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1760}
1761
1762/* btrace recording does not store previous memory content, neither the stack
1763 frames content. Any unwinding would return errorneous results as the stack
1764 contents no longer matches the changed PC value restored from history.
1765 Therefore this unwinder reports any possibly unwound registers as
1766 <unavailable>. */
1767
0b722aec 1768const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1769{
1770 NORMAL_FRAME,
1771 record_btrace_frame_unwind_stop_reason,
1772 record_btrace_frame_this_id,
1773 record_btrace_frame_prev_register,
1774 NULL,
0b722aec
MM
1775 record_btrace_frame_sniffer,
1776 record_btrace_frame_dealloc_cache
1777};
1778
1779const struct frame_unwind record_btrace_tailcall_frame_unwind =
1780{
1781 TAILCALL_FRAME,
1782 record_btrace_frame_unwind_stop_reason,
1783 record_btrace_frame_this_id,
1784 record_btrace_frame_prev_register,
1785 NULL,
1786 record_btrace_tailcall_frame_sniffer,
1787 record_btrace_frame_dealloc_cache
cecac1ab 1788};
b2f4cfde 1789
ac01945b
TT
1790/* Implement the to_get_unwinder method. */
1791
1792static const struct frame_unwind *
1793record_btrace_to_get_unwinder (struct target_ops *self)
1794{
1795 return &record_btrace_frame_unwind;
1796}
1797
1798/* Implement the to_get_tailcall_unwinder method. */
1799
1800static const struct frame_unwind *
1801record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1802{
1803 return &record_btrace_tailcall_frame_unwind;
1804}
1805
987e68b1
MM
1806/* Return a human-readable string for FLAG. */
1807
1808static const char *
1809btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1810{
1811 switch (flag)
1812 {
1813 case BTHR_STEP:
1814 return "step";
1815
1816 case BTHR_RSTEP:
1817 return "reverse-step";
1818
1819 case BTHR_CONT:
1820 return "cont";
1821
1822 case BTHR_RCONT:
1823 return "reverse-cont";
1824
1825 case BTHR_STOP:
1826 return "stop";
1827 }
1828
1829 return "<invalid>";
1830}
1831
52834460
MM
1832/* Indicate that TP should be resumed according to FLAG. */
1833
1834static void
1835record_btrace_resume_thread (struct thread_info *tp,
1836 enum btrace_thread_flag flag)
1837{
1838 struct btrace_thread_info *btinfo;
1839
43792cf0 1840 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1841 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1842
1843 btinfo = &tp->btrace;
1844
52834460
MM
1845 /* Fetch the latest branch trace. */
1846 btrace_fetch (tp);
1847
0ca912df
MM
1848 /* A resume request overwrites a preceding resume or stop request. */
1849 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1850 btinfo->flags |= flag;
1851}
1852
ec71cc2f
MM
1853/* Get the current frame for TP. */
1854
1855static struct frame_info *
1856get_thread_current_frame (struct thread_info *tp)
1857{
1858 struct frame_info *frame;
1859 ptid_t old_inferior_ptid;
1860 int executing;
1861
1862 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1863 old_inferior_ptid = inferior_ptid;
1864 inferior_ptid = tp->ptid;
1865
1866 /* Clear the executing flag to allow changes to the current frame.
1867 We are not actually running, yet. We just started a reverse execution
1868 command or a record goto command.
1869 For the latter, EXECUTING is false and this has no effect.
1870 For the former, EXECUTING is true and we're in to_wait, about to
1871 move the thread. Since we need to recompute the stack, we temporarily
1872 set EXECUTING to flase. */
1873 executing = is_executing (inferior_ptid);
1874 set_executing (inferior_ptid, 0);
1875
1876 frame = NULL;
1877 TRY
1878 {
1879 frame = get_current_frame ();
1880 }
1881 CATCH (except, RETURN_MASK_ALL)
1882 {
1883 /* Restore the previous execution state. */
1884 set_executing (inferior_ptid, executing);
1885
1886 /* Restore the previous inferior_ptid. */
1887 inferior_ptid = old_inferior_ptid;
1888
1889 throw_exception (except);
1890 }
1891 END_CATCH
1892
1893 /* Restore the previous execution state. */
1894 set_executing (inferior_ptid, executing);
1895
1896 /* Restore the previous inferior_ptid. */
1897 inferior_ptid = old_inferior_ptid;
1898
1899 return frame;
1900}
1901
52834460
MM
1902/* Start replaying a thread. */
1903
1904static struct btrace_insn_iterator *
1905record_btrace_start_replaying (struct thread_info *tp)
1906{
52834460
MM
1907 struct btrace_insn_iterator *replay;
1908 struct btrace_thread_info *btinfo;
52834460
MM
1909
1910 btinfo = &tp->btrace;
1911 replay = NULL;
1912
1913 /* We can't start replaying without trace. */
b54b03bd 1914 if (btinfo->functions.empty ())
52834460
MM
1915 return NULL;
1916
52834460
MM
1917 /* GDB stores the current frame_id when stepping in order to detects steps
1918 into subroutines.
1919 Since frames are computed differently when we're replaying, we need to
1920 recompute those stored frames and fix them up so we can still detect
1921 subroutines after we started replaying. */
492d29ea 1922 TRY
52834460
MM
1923 {
1924 struct frame_info *frame;
1925 struct frame_id frame_id;
1926 int upd_step_frame_id, upd_step_stack_frame_id;
1927
1928 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1929 frame = get_thread_current_frame (tp);
52834460
MM
1930 frame_id = get_frame_id (frame);
1931
1932 /* Check if we need to update any stepping-related frame id's. */
1933 upd_step_frame_id = frame_id_eq (frame_id,
1934 tp->control.step_frame_id);
1935 upd_step_stack_frame_id = frame_id_eq (frame_id,
1936 tp->control.step_stack_frame_id);
1937
1938 /* We start replaying at the end of the branch trace. This corresponds
1939 to the current instruction. */
8d749320 1940 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1941 btrace_insn_end (replay, btinfo);
1942
31fd9caa
MM
1943 /* Skip gaps at the end of the trace. */
1944 while (btrace_insn_get (replay) == NULL)
1945 {
1946 unsigned int steps;
1947
1948 steps = btrace_insn_prev (replay, 1);
1949 if (steps == 0)
1950 error (_("No trace."));
1951 }
1952
52834460
MM
1953 /* We're not replaying, yet. */
1954 gdb_assert (btinfo->replay == NULL);
1955 btinfo->replay = replay;
1956
1957 /* Make sure we're not using any stale registers. */
1958 registers_changed_ptid (tp->ptid);
1959
1960 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1961 frame = get_thread_current_frame (tp);
52834460
MM
1962 frame_id = get_frame_id (frame);
1963
1964 /* Replace stepping related frames where necessary. */
1965 if (upd_step_frame_id)
1966 tp->control.step_frame_id = frame_id;
1967 if (upd_step_stack_frame_id)
1968 tp->control.step_stack_frame_id = frame_id;
1969 }
492d29ea 1970 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1971 {
1972 xfree (btinfo->replay);
1973 btinfo->replay = NULL;
1974
1975 registers_changed_ptid (tp->ptid);
1976
1977 throw_exception (except);
1978 }
492d29ea 1979 END_CATCH
52834460
MM
1980
1981 return replay;
1982}
1983
1984/* Stop replaying a thread. */
1985
1986static void
1987record_btrace_stop_replaying (struct thread_info *tp)
1988{
1989 struct btrace_thread_info *btinfo;
1990
1991 btinfo = &tp->btrace;
1992
1993 xfree (btinfo->replay);
1994 btinfo->replay = NULL;
1995
1996 /* Make sure we're not leaving any stale registers. */
1997 registers_changed_ptid (tp->ptid);
1998}
1999
e3cfc1c7
MM
2000/* Stop replaying TP if it is at the end of its execution history. */
2001
2002static void
2003record_btrace_stop_replaying_at_end (struct thread_info *tp)
2004{
2005 struct btrace_insn_iterator *replay, end;
2006 struct btrace_thread_info *btinfo;
2007
2008 btinfo = &tp->btrace;
2009 replay = btinfo->replay;
2010
2011 if (replay == NULL)
2012 return;
2013
2014 btrace_insn_end (&end, btinfo);
2015
2016 if (btrace_insn_cmp (replay, &end) == 0)
2017 record_btrace_stop_replaying (tp);
2018}
2019
b2f4cfde
MM
2020/* The to_resume method of target record-btrace. */
2021
2022static void
2023record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2024 enum gdb_signal signal)
2025{
0ca912df 2026 struct thread_info *tp;
d2939ba2 2027 enum btrace_thread_flag flag, cflag;
52834460 2028
987e68b1
MM
2029 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2030 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2031 step ? "step" : "cont");
52834460 2032
0ca912df
MM
2033 /* Store the execution direction of the last resume.
2034
2035 If there is more than one to_resume call, we have to rely on infrun
2036 to not change the execution direction in-between. */
70ad5bff
MM
2037 record_btrace_resume_exec_dir = execution_direction;
2038
0ca912df 2039 /* As long as we're not replaying, just forward the request.
52834460 2040
0ca912df
MM
2041 For non-stop targets this means that no thread is replaying. In order to
2042 make progress, we may need to explicitly move replaying threads to the end
2043 of their execution history. */
a52eab48
MM
2044 if ((execution_direction != EXEC_REVERSE)
2045 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2046 {
e75fdfca 2047 ops = ops->beneath;
04c4fe8c
MM
2048 ops->to_resume (ops, ptid, step, signal);
2049 return;
b2f4cfde
MM
2050 }
2051
52834460 2052 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2053 if (execution_direction == EXEC_REVERSE)
2054 {
2055 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2056 cflag = BTHR_RCONT;
2057 }
52834460 2058 else
d2939ba2
MM
2059 {
2060 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2061 cflag = BTHR_CONT;
2062 }
52834460 2063
52834460 2064 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2065 record_btrace_wait below.
2066
2067 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2068 if (!target_is_non_stop_p ())
2069 {
2070 gdb_assert (ptid_match (inferior_ptid, ptid));
2071
2072 ALL_NON_EXITED_THREADS (tp)
2073 if (ptid_match (tp->ptid, ptid))
2074 {
2075 if (ptid_match (tp->ptid, inferior_ptid))
2076 record_btrace_resume_thread (tp, flag);
2077 else
2078 record_btrace_resume_thread (tp, cflag);
2079 }
2080 }
2081 else
2082 {
2083 ALL_NON_EXITED_THREADS (tp)
2084 if (ptid_match (tp->ptid, ptid))
2085 record_btrace_resume_thread (tp, flag);
2086 }
70ad5bff
MM
2087
2088 /* Async support. */
2089 if (target_can_async_p ())
2090 {
6a3753b3 2091 target_async (1);
70ad5bff
MM
2092 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2093 }
52834460
MM
2094}
2095
85ad3aaf
PA
2096/* The to_commit_resume method of target record-btrace. */
2097
2098static void
2099record_btrace_commit_resume (struct target_ops *ops)
2100{
2101 if ((execution_direction != EXEC_REVERSE)
2102 && !record_btrace_is_replaying (ops, minus_one_ptid))
2103 ops->beneath->to_commit_resume (ops->beneath);
2104}
2105
987e68b1
MM
2106/* Cancel resuming TP. */
2107
2108static void
2109record_btrace_cancel_resume (struct thread_info *tp)
2110{
2111 enum btrace_thread_flag flags;
2112
2113 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2114 if (flags == 0)
2115 return;
2116
43792cf0
PA
2117 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2118 print_thread_id (tp),
987e68b1
MM
2119 target_pid_to_str (tp->ptid), flags,
2120 btrace_thread_flag_to_str (flags));
2121
2122 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2123 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2124}
2125
2126/* Return a target_waitstatus indicating that we ran out of history. */
2127
2128static struct target_waitstatus
2129btrace_step_no_history (void)
2130{
2131 struct target_waitstatus status;
2132
2133 status.kind = TARGET_WAITKIND_NO_HISTORY;
2134
2135 return status;
2136}
2137
2138/* Return a target_waitstatus indicating that a step finished. */
2139
2140static struct target_waitstatus
2141btrace_step_stopped (void)
2142{
2143 struct target_waitstatus status;
2144
2145 status.kind = TARGET_WAITKIND_STOPPED;
2146 status.value.sig = GDB_SIGNAL_TRAP;
2147
2148 return status;
2149}
2150
6e4879f0
MM
2151/* Return a target_waitstatus indicating that a thread was stopped as
2152 requested. */
2153
2154static struct target_waitstatus
2155btrace_step_stopped_on_request (void)
2156{
2157 struct target_waitstatus status;
2158
2159 status.kind = TARGET_WAITKIND_STOPPED;
2160 status.value.sig = GDB_SIGNAL_0;
2161
2162 return status;
2163}
2164
d825d248
MM
2165/* Return a target_waitstatus indicating a spurious stop. */
2166
2167static struct target_waitstatus
2168btrace_step_spurious (void)
2169{
2170 struct target_waitstatus status;
2171
2172 status.kind = TARGET_WAITKIND_SPURIOUS;
2173
2174 return status;
2175}
2176
e3cfc1c7
MM
2177/* Return a target_waitstatus indicating that the thread was not resumed. */
2178
2179static struct target_waitstatus
2180btrace_step_no_resumed (void)
2181{
2182 struct target_waitstatus status;
2183
2184 status.kind = TARGET_WAITKIND_NO_RESUMED;
2185
2186 return status;
2187}
2188
2189/* Return a target_waitstatus indicating that we should wait again. */
2190
2191static struct target_waitstatus
2192btrace_step_again (void)
2193{
2194 struct target_waitstatus status;
2195
2196 status.kind = TARGET_WAITKIND_IGNORE;
2197
2198 return status;
2199}
2200
52834460
MM
2201/* Clear the record histories. */
2202
2203static void
2204record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2205{
2206 xfree (btinfo->insn_history);
2207 xfree (btinfo->call_history);
2208
2209 btinfo->insn_history = NULL;
2210 btinfo->call_history = NULL;
2211}
2212
3c615f99
MM
2213/* Check whether TP's current replay position is at a breakpoint. */
2214
2215static int
2216record_btrace_replay_at_breakpoint (struct thread_info *tp)
2217{
2218 struct btrace_insn_iterator *replay;
2219 struct btrace_thread_info *btinfo;
2220 const struct btrace_insn *insn;
2221 struct inferior *inf;
2222
2223 btinfo = &tp->btrace;
2224 replay = btinfo->replay;
2225
2226 if (replay == NULL)
2227 return 0;
2228
2229 insn = btrace_insn_get (replay);
2230 if (insn == NULL)
2231 return 0;
2232
2233 inf = find_inferior_ptid (tp->ptid);
2234 if (inf == NULL)
2235 return 0;
2236
2237 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2238 &btinfo->stop_reason);
2239}
2240
d825d248 2241/* Step one instruction in forward direction. */
52834460
MM
2242
2243static struct target_waitstatus
d825d248 2244record_btrace_single_step_forward (struct thread_info *tp)
52834460 2245{
b61ce85c 2246 struct btrace_insn_iterator *replay, end, start;
52834460 2247 struct btrace_thread_info *btinfo;
52834460 2248
d825d248
MM
2249 btinfo = &tp->btrace;
2250 replay = btinfo->replay;
2251
2252 /* We're done if we're not replaying. */
2253 if (replay == NULL)
2254 return btrace_step_no_history ();
2255
011c71b6
MM
2256 /* Check if we're stepping a breakpoint. */
2257 if (record_btrace_replay_at_breakpoint (tp))
2258 return btrace_step_stopped ();
2259
b61ce85c
MM
2260 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2261 jump back to the instruction at which we started. */
2262 start = *replay;
d825d248
MM
2263 do
2264 {
2265 unsigned int steps;
2266
e3cfc1c7
MM
2267 /* We will bail out here if we continue stepping after reaching the end
2268 of the execution history. */
d825d248
MM
2269 steps = btrace_insn_next (replay, 1);
2270 if (steps == 0)
b61ce85c
MM
2271 {
2272 *replay = start;
2273 return btrace_step_no_history ();
2274 }
d825d248
MM
2275 }
2276 while (btrace_insn_get (replay) == NULL);
2277
2278 /* Determine the end of the instruction trace. */
2279 btrace_insn_end (&end, btinfo);
2280
e3cfc1c7
MM
2281 /* The execution trace contains (and ends with) the current instruction.
2282 This instruction has not been executed, yet, so the trace really ends
2283 one instruction earlier. */
d825d248 2284 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2285 return btrace_step_no_history ();
d825d248
MM
2286
2287 return btrace_step_spurious ();
2288}
2289
2290/* Step one instruction in backward direction. */
2291
2292static struct target_waitstatus
2293record_btrace_single_step_backward (struct thread_info *tp)
2294{
b61ce85c 2295 struct btrace_insn_iterator *replay, start;
d825d248 2296 struct btrace_thread_info *btinfo;
e59fa00f 2297
52834460
MM
2298 btinfo = &tp->btrace;
2299 replay = btinfo->replay;
2300
d825d248
MM
2301 /* Start replaying if we're not already doing so. */
2302 if (replay == NULL)
2303 replay = record_btrace_start_replaying (tp);
2304
2305 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2306 Skip gaps during replay. If we end up at a gap (at the beginning of
2307 the trace), jump back to the instruction at which we started. */
2308 start = *replay;
d825d248
MM
2309 do
2310 {
2311 unsigned int steps;
2312
2313 steps = btrace_insn_prev (replay, 1);
2314 if (steps == 0)
b61ce85c
MM
2315 {
2316 *replay = start;
2317 return btrace_step_no_history ();
2318 }
d825d248
MM
2319 }
2320 while (btrace_insn_get (replay) == NULL);
2321
011c71b6
MM
2322 /* Check if we're stepping a breakpoint.
2323
2324 For reverse-stepping, this check is after the step. There is logic in
2325 infrun.c that handles reverse-stepping separately. See, for example,
2326 proceed and adjust_pc_after_break.
2327
2328 This code assumes that for reverse-stepping, PC points to the last
2329 de-executed instruction, whereas for forward-stepping PC points to the
2330 next to-be-executed instruction. */
2331 if (record_btrace_replay_at_breakpoint (tp))
2332 return btrace_step_stopped ();
2333
d825d248
MM
2334 return btrace_step_spurious ();
2335}
2336
2337/* Step a single thread. */
2338
2339static struct target_waitstatus
2340record_btrace_step_thread (struct thread_info *tp)
2341{
2342 struct btrace_thread_info *btinfo;
2343 struct target_waitstatus status;
2344 enum btrace_thread_flag flags;
2345
2346 btinfo = &tp->btrace;
2347
6e4879f0
MM
2348 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2349 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2350
43792cf0 2351 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2352 target_pid_to_str (tp->ptid), flags,
2353 btrace_thread_flag_to_str (flags));
52834460 2354
6e4879f0
MM
2355 /* We can't step without an execution history. */
2356 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2357 return btrace_step_no_history ();
2358
52834460
MM
2359 switch (flags)
2360 {
2361 default:
2362 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2363
6e4879f0
MM
2364 case BTHR_STOP:
2365 return btrace_step_stopped_on_request ();
2366
52834460 2367 case BTHR_STEP:
d825d248
MM
2368 status = record_btrace_single_step_forward (tp);
2369 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2370 break;
52834460
MM
2371
2372 return btrace_step_stopped ();
2373
2374 case BTHR_RSTEP:
d825d248
MM
2375 status = record_btrace_single_step_backward (tp);
2376 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2377 break;
52834460
MM
2378
2379 return btrace_step_stopped ();
2380
2381 case BTHR_CONT:
e3cfc1c7
MM
2382 status = record_btrace_single_step_forward (tp);
2383 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2384 break;
52834460 2385
e3cfc1c7
MM
2386 btinfo->flags |= flags;
2387 return btrace_step_again ();
52834460
MM
2388
2389 case BTHR_RCONT:
e3cfc1c7
MM
2390 status = record_btrace_single_step_backward (tp);
2391 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2392 break;
52834460 2393
e3cfc1c7
MM
2394 btinfo->flags |= flags;
2395 return btrace_step_again ();
2396 }
d825d248 2397
e3cfc1c7
MM
2398 /* We keep threads moving at the end of their execution history. The to_wait
2399 method will stop the thread for whom the event is reported. */
2400 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2401 btinfo->flags |= flags;
52834460 2402
e3cfc1c7 2403 return status;
b2f4cfde
MM
2404}
2405
e3cfc1c7
MM
2406/* A vector of threads. */
2407
2408typedef struct thread_info * tp_t;
2409DEF_VEC_P (tp_t);
2410
a6b5be76
MM
2411/* Announce further events if necessary. */
2412
2413static void
2414record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2415 const VEC (tp_t) *no_history)
2416{
2417 int more_moving, more_no_history;
2418
2419 more_moving = !VEC_empty (tp_t, moving);
2420 more_no_history = !VEC_empty (tp_t, no_history);
2421
2422 if (!more_moving && !more_no_history)
2423 return;
2424
2425 if (more_moving)
2426 DEBUG ("movers pending");
2427
2428 if (more_no_history)
2429 DEBUG ("no-history pending");
2430
2431 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2432}
2433
b2f4cfde
MM
2434/* The to_wait method of target record-btrace. */
2435
2436static ptid_t
2437record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2438 struct target_waitstatus *status, int options)
2439{
e3cfc1c7
MM
2440 VEC (tp_t) *moving, *no_history;
2441 struct thread_info *tp, *eventing;
2442 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2443
2444 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2445
b2f4cfde 2446 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2447 if ((execution_direction != EXEC_REVERSE)
2448 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2449 {
e75fdfca
TT
2450 ops = ops->beneath;
2451 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2452 }
2453
e3cfc1c7
MM
2454 moving = NULL;
2455 no_history = NULL;
2456
2457 make_cleanup (VEC_cleanup (tp_t), &moving);
2458 make_cleanup (VEC_cleanup (tp_t), &no_history);
2459
2460 /* Keep a work list of moving threads. */
2461 ALL_NON_EXITED_THREADS (tp)
2462 if (ptid_match (tp->ptid, ptid)
2463 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2464 VEC_safe_push (tp_t, moving, tp);
2465
2466 if (VEC_empty (tp_t, moving))
52834460 2467 {
e3cfc1c7 2468 *status = btrace_step_no_resumed ();
52834460 2469
e3cfc1c7 2470 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2471 target_waitstatus_to_string (status).c_str ());
e3cfc1c7
MM
2472
2473 do_cleanups (cleanups);
2474 return null_ptid;
52834460
MM
2475 }
2476
e3cfc1c7
MM
2477 /* Step moving threads one by one, one step each, until either one thread
2478 reports an event or we run out of threads to step.
2479
2480 When stepping more than one thread, chances are that some threads reach
2481 the end of their execution history earlier than others. If we reported
2482 this immediately, all-stop on top of non-stop would stop all threads and
2483 resume the same threads next time. And we would report the same thread
2484 having reached the end of its execution history again.
2485
2486 In the worst case, this would starve the other threads. But even if other
2487 threads would be allowed to make progress, this would result in far too
2488 many intermediate stops.
2489
2490 We therefore delay the reporting of "no execution history" until we have
2491 nothing else to report. By this time, all threads should have moved to
2492 either the beginning or the end of their execution history. There will
2493 be a single user-visible stop. */
2494 eventing = NULL;
2495 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2496 {
2497 unsigned int ix;
2498
2499 ix = 0;
2500 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2501 {
2502 *status = record_btrace_step_thread (tp);
2503
2504 switch (status->kind)
2505 {
2506 case TARGET_WAITKIND_IGNORE:
2507 ix++;
2508 break;
2509
2510 case TARGET_WAITKIND_NO_HISTORY:
2511 VEC_safe_push (tp_t, no_history,
2512 VEC_ordered_remove (tp_t, moving, ix));
2513 break;
2514
2515 default:
2516 eventing = VEC_unordered_remove (tp_t, moving, ix);
2517 break;
2518 }
2519 }
2520 }
2521
2522 if (eventing == NULL)
2523 {
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2526
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
2529 gdb_assert (!VEC_empty (tp_t, no_history));
2530
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
2533 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2534 eventing->btrace.flags &= ~BTHR_MOVE;
2535
2536 *status = btrace_step_no_history ();
2537 }
2538
2539 gdb_assert (eventing != NULL);
2540
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2544
2545 /* Stop all other threads. */
5953356c 2546 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2547 ALL_NON_EXITED_THREADS (tp)
2548 record_btrace_cancel_resume (tp);
52834460 2549
a6b5be76
MM
2550 /* In async mode, we need to announce further events. */
2551 if (target_is_async_p ())
2552 record_btrace_maybe_mark_async_event (moving, no_history);
2553
52834460 2554 /* Start record histories anew from the current position. */
e3cfc1c7 2555 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2556
2557 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2558 registers_changed_ptid (eventing->ptid);
2559
43792cf0
PA
2560 DEBUG ("wait ended by thread %s (%s): %s",
2561 print_thread_id (eventing),
e3cfc1c7 2562 target_pid_to_str (eventing->ptid),
23fdd69e 2563 target_waitstatus_to_string (status).c_str ());
52834460 2564
e3cfc1c7
MM
2565 do_cleanups (cleanups);
2566 return eventing->ptid;
52834460
MM
2567}
2568
6e4879f0
MM
2569/* The to_stop method of target record-btrace. */
2570
2571static void
2572record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2573{
2574 DEBUG ("stop %s", target_pid_to_str (ptid));
2575
2576 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2577 if ((execution_direction != EXEC_REVERSE)
2578 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2579 {
2580 ops = ops->beneath;
2581 ops->to_stop (ops, ptid);
2582 }
2583 else
2584 {
2585 struct thread_info *tp;
2586
2587 ALL_NON_EXITED_THREADS (tp)
2588 if (ptid_match (tp->ptid, ptid))
2589 {
2590 tp->btrace.flags &= ~BTHR_MOVE;
2591 tp->btrace.flags |= BTHR_STOP;
2592 }
2593 }
2594 }
2595
52834460
MM
2596/* The to_can_execute_reverse method of target record-btrace. */
2597
2598static int
19db3e69 2599record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2600{
2601 return 1;
2602}
2603
9e8915c6 2604/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2605
9e8915c6
PA
2606static int
2607record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2608{
a52eab48 2609 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2610 {
2611 struct thread_info *tp = inferior_thread ();
2612
2613 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2614 }
2615
2616 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2617}
2618
2619/* The to_supports_stopped_by_sw_breakpoint method of target
2620 record-btrace. */
2621
2622static int
2623record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2624{
a52eab48 2625 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2626 return 1;
2627
2628 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2629}
2630
2631/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2632
2633static int
2634record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2635{
a52eab48 2636 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2637 {
2638 struct thread_info *tp = inferior_thread ();
2639
2640 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2641 }
2642
2643 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2644}
2645
2646/* The to_supports_stopped_by_hw_breakpoint method of target
2647 record-btrace. */
2648
2649static int
2650record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2651{
a52eab48 2652 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2653 return 1;
52834460 2654
9e8915c6 2655 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2656}
2657
e8032dde 2658/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2659
2660static void
e8032dde 2661record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2662{
e8032dde 2663 /* We don't add or remove threads during replay. */
a52eab48 2664 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2665 return;
2666
2667 /* Forward the request. */
e75fdfca 2668 ops = ops->beneath;
e8032dde 2669 ops->to_update_thread_list (ops);
e2887aa3
MM
2670}
2671
2672/* The to_thread_alive method of target record-btrace. */
2673
2674static int
2675record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2676{
2677 /* We don't add or remove threads during replay. */
a52eab48 2678 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2679 return find_thread_ptid (ptid) != NULL;
2680
2681 /* Forward the request. */
e75fdfca
TT
2682 ops = ops->beneath;
2683 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2684}
2685
066ce621
MM
2686/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2687 is stopped. */
2688
2689static void
2690record_btrace_set_replay (struct thread_info *tp,
2691 const struct btrace_insn_iterator *it)
2692{
2693 struct btrace_thread_info *btinfo;
2694
2695 btinfo = &tp->btrace;
2696
a0f1b963 2697 if (it == NULL)
52834460 2698 record_btrace_stop_replaying (tp);
066ce621
MM
2699 else
2700 {
2701 if (btinfo->replay == NULL)
52834460 2702 record_btrace_start_replaying (tp);
066ce621
MM
2703 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2704 return;
2705
2706 *btinfo->replay = *it;
52834460 2707 registers_changed_ptid (tp->ptid);
066ce621
MM
2708 }
2709
52834460
MM
2710 /* Start anew from the new replay position. */
2711 record_btrace_clear_histories (btinfo);
485668e5
MM
2712
2713 stop_pc = regcache_read_pc (get_current_regcache ());
2714 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2715}
2716
2717/* The to_goto_record_begin method of target record-btrace. */
2718
2719static void
08475817 2720record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2721{
2722 struct thread_info *tp;
2723 struct btrace_insn_iterator begin;
2724
2725 tp = require_btrace_thread ();
2726
2727 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2728
2729 /* Skip gaps at the beginning of the trace. */
2730 while (btrace_insn_get (&begin) == NULL)
2731 {
2732 unsigned int steps;
2733
2734 steps = btrace_insn_next (&begin, 1);
2735 if (steps == 0)
2736 error (_("No trace."));
2737 }
2738
066ce621 2739 record_btrace_set_replay (tp, &begin);
066ce621
MM
2740}
2741
2742/* The to_goto_record_end method of target record-btrace. */
2743
2744static void
307a1b91 2745record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2746{
2747 struct thread_info *tp;
2748
2749 tp = require_btrace_thread ();
2750
2751 record_btrace_set_replay (tp, NULL);
066ce621
MM
2752}
2753
2754/* The to_goto_record method of target record-btrace. */
2755
2756static void
606183ac 2757record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2758{
2759 struct thread_info *tp;
2760 struct btrace_insn_iterator it;
2761 unsigned int number;
2762 int found;
2763
2764 number = insn;
2765
2766 /* Check for wrap-arounds. */
2767 if (number != insn)
2768 error (_("Instruction number out of range."));
2769
2770 tp = require_btrace_thread ();
2771
2772 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2773
2774 /* Check if the instruction could not be found or is a gap. */
2775 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2776 error (_("No such instruction."));
2777
2778 record_btrace_set_replay (tp, &it);
066ce621
MM
2779}
2780
797094dd
MM
2781/* The to_record_stop_replaying method of target record-btrace. */
2782
2783static void
2784record_btrace_stop_replaying_all (struct target_ops *self)
2785{
2786 struct thread_info *tp;
2787
2788 ALL_NON_EXITED_THREADS (tp)
2789 record_btrace_stop_replaying (tp);
2790}
2791
70ad5bff
MM
2792/* The to_execution_direction target method. */
2793
2794static enum exec_direction_kind
2795record_btrace_execution_direction (struct target_ops *self)
2796{
2797 return record_btrace_resume_exec_dir;
2798}
2799
aef92902
MM
2800/* The to_prepare_to_generate_core target method. */
2801
2802static void
2803record_btrace_prepare_to_generate_core (struct target_ops *self)
2804{
2805 record_btrace_generating_corefile = 1;
2806}
2807
2808/* The to_done_generating_core target method. */
2809
2810static void
2811record_btrace_done_generating_core (struct target_ops *self)
2812{
2813 record_btrace_generating_corefile = 0;
2814}
2815
afedecd3
MM
2816/* Initialize the record-btrace target ops. */
2817
2818static void
2819init_record_btrace_ops (void)
2820{
2821 struct target_ops *ops;
2822
2823 ops = &record_btrace_ops;
2824 ops->to_shortname = "record-btrace";
2825 ops->to_longname = "Branch tracing target";
2826 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2827 ops->to_open = record_btrace_open;
2828 ops->to_close = record_btrace_close;
b7d2e916 2829 ops->to_async = record_btrace_async;
afedecd3 2830 ops->to_detach = record_detach;
c0272db5 2831 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2832 ops->to_mourn_inferior = record_mourn_inferior;
2833 ops->to_kill = record_kill;
afedecd3
MM
2834 ops->to_stop_recording = record_btrace_stop_recording;
2835 ops->to_info_record = record_btrace_info;
2836 ops->to_insn_history = record_btrace_insn_history;
2837 ops->to_insn_history_from = record_btrace_insn_history_from;
2838 ops->to_insn_history_range = record_btrace_insn_history_range;
2839 ops->to_call_history = record_btrace_call_history;
2840 ops->to_call_history_from = record_btrace_call_history_from;
2841 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2842 ops->to_record_method = record_btrace_record_method;
07bbe694 2843 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2844 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2845 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2846 ops->to_xfer_partial = record_btrace_xfer_partial;
2847 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2848 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2849 ops->to_fetch_registers = record_btrace_fetch_registers;
2850 ops->to_store_registers = record_btrace_store_registers;
2851 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2852 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2853 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2854 ops->to_resume = record_btrace_resume;
85ad3aaf 2855 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2856 ops->to_wait = record_btrace_wait;
6e4879f0 2857 ops->to_stop = record_btrace_stop;
e8032dde 2858 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2859 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2860 ops->to_goto_record_begin = record_btrace_goto_begin;
2861 ops->to_goto_record_end = record_btrace_goto_end;
2862 ops->to_goto_record = record_btrace_goto;
52834460 2863 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2864 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2865 ops->to_supports_stopped_by_sw_breakpoint
2866 = record_btrace_supports_stopped_by_sw_breakpoint;
2867 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2868 ops->to_supports_stopped_by_hw_breakpoint
2869 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2870 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2871 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2872 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2873 ops->to_stratum = record_stratum;
2874 ops->to_magic = OPS_MAGIC;
2875}
2876
f4abbc16
MM
2877/* Start recording in BTS format. */
2878
2879static void
cdb34d4a 2880cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2881{
f4abbc16
MM
2882 if (args != NULL && *args != 0)
2883 error (_("Invalid argument."));
2884
2885 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2886
492d29ea
PA
2887 TRY
2888 {
95a6b0a1 2889 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2890 }
2891 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2892 {
2893 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2894 throw_exception (exception);
2895 }
492d29ea 2896 END_CATCH
f4abbc16
MM
2897}
2898
bc504a31 2899/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2900
2901static void
cdb34d4a 2902cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2903{
2904 if (args != NULL && *args != 0)
2905 error (_("Invalid argument."));
2906
b20a6524 2907 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2908
492d29ea
PA
2909 TRY
2910 {
95a6b0a1 2911 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2912 }
2913 CATCH (exception, RETURN_MASK_ALL)
2914 {
2915 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2916 throw_exception (exception);
2917 }
2918 END_CATCH
afedecd3
MM
2919}
2920
b20a6524
MM
2921/* Alias for "target record". */
2922
2923static void
981a3fb3 2924cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2925{
2926 if (args != NULL && *args != 0)
2927 error (_("Invalid argument."));
2928
2929 record_btrace_conf.format = BTRACE_FORMAT_PT;
2930
2931 TRY
2932 {
95a6b0a1 2933 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2934 }
2935 CATCH (exception, RETURN_MASK_ALL)
2936 {
2937 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2938
2939 TRY
2940 {
95a6b0a1 2941 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2942 }
2943 CATCH (exception, RETURN_MASK_ALL)
2944 {
2945 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2946 throw_exception (exception);
2947 }
2948 END_CATCH
2949 }
2950 END_CATCH
2951}
2952
67b5c0c1
MM
2953/* The "set record btrace" command. */
2954
2955static void
981a3fb3 2956cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2957{
2958 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2959}
2960
2961/* The "show record btrace" command. */
2962
2963static void
981a3fb3 2964cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2965{
2966 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2967}
2968
2969/* The "show record btrace replay-memory-access" command. */
2970
2971static void
2972cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2973 struct cmd_list_element *c, const char *value)
2974{
2975 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2976 replay_memory_access);
2977}
2978
d33501a5
MM
2979/* The "set record btrace bts" command. */
2980
2981static void
981a3fb3 2982cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
2983{
2984 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2985 "by an appropriate subcommand.\n"));
d33501a5
MM
2986 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2987 all_commands, gdb_stdout);
2988}
2989
2990/* The "show record btrace bts" command. */
2991
2992static void
981a3fb3 2993cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
2994{
2995 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2996}
2997
b20a6524
MM
2998/* The "set record btrace pt" command. */
2999
3000static void
981a3fb3 3001cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3002{
3003 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3004 "by an appropriate subcommand.\n"));
3005 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3006 all_commands, gdb_stdout);
3007}
3008
3009/* The "show record btrace pt" command. */
3010
3011static void
981a3fb3 3012cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3013{
3014 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3015}
3016
3017/* The "record bts buffer-size" show value function. */
3018
3019static void
3020show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3021 struct cmd_list_element *c,
3022 const char *value)
3023{
3024 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3025 value);
3026}
3027
3028/* The "record pt buffer-size" show value function. */
3029
3030static void
3031show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3032 struct cmd_list_element *c,
3033 const char *value)
3034{
3035 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3036 value);
3037}
3038
afedecd3
MM
3039/* Initialize btrace commands. */
3040
3041void
3042_initialize_record_btrace (void)
3043{
f4abbc16
MM
3044 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3045 _("Start branch trace recording."), &record_btrace_cmdlist,
3046 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3047 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3048
f4abbc16
MM
3049 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3050 _("\
3051Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3052The processor stores a from/to record for each branch into a cyclic buffer.\n\
3053This format may not be available on all processors."),
3054 &record_btrace_cmdlist);
3055 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3056
b20a6524
MM
3057 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3058 _("\
bc504a31 3059Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3060This format may not be available on all processors."),
3061 &record_btrace_cmdlist);
3062 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3063
67b5c0c1
MM
3064 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3065 _("Set record options"), &set_record_btrace_cmdlist,
3066 "set record btrace ", 0, &set_record_cmdlist);
3067
3068 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3069 _("Show record options"), &show_record_btrace_cmdlist,
3070 "show record btrace ", 0, &show_record_cmdlist);
3071
3072 add_setshow_enum_cmd ("replay-memory-access", no_class,
3073 replay_memory_access_types, &replay_memory_access, _("\
3074Set what memory accesses are allowed during replay."), _("\
3075Show what memory accesses are allowed during replay."),
3076 _("Default is READ-ONLY.\n\n\
3077The btrace record target does not trace data.\n\
3078The memory therefore corresponds to the live target and not \
3079to the current replay position.\n\n\
3080When READ-ONLY, allow accesses to read-only memory during replay.\n\
3081When READ-WRITE, allow accesses to read-only and read-write memory during \
3082replay."),
3083 NULL, cmd_show_replay_memory_access,
3084 &set_record_btrace_cmdlist,
3085 &show_record_btrace_cmdlist);
3086
d33501a5
MM
3087 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3088 _("Set record btrace bts options"),
3089 &set_record_btrace_bts_cmdlist,
3090 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3091
3092 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3093 _("Show record btrace bts options"),
3094 &show_record_btrace_bts_cmdlist,
3095 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3096
3097 add_setshow_uinteger_cmd ("buffer-size", no_class,
3098 &record_btrace_conf.bts.size,
3099 _("Set the record/replay bts buffer size."),
3100 _("Show the record/replay bts buffer size."), _("\
3101When starting recording request a trace buffer of this size. \
3102The actual buffer size may differ from the requested size. \
3103Use \"info record\" to see the actual buffer size.\n\n\
3104Bigger buffers allow longer recording but also take more time to process \
3105the recorded execution trace.\n\n\
b20a6524
MM
3106The trace buffer size may not be changed while recording."), NULL,
3107 show_record_bts_buffer_size_value,
d33501a5
MM
3108 &set_record_btrace_bts_cmdlist,
3109 &show_record_btrace_bts_cmdlist);
3110
b20a6524
MM
3111 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3112 _("Set record btrace pt options"),
3113 &set_record_btrace_pt_cmdlist,
3114 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3115
3116 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3117 _("Show record btrace pt options"),
3118 &show_record_btrace_pt_cmdlist,
3119 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3120
3121 add_setshow_uinteger_cmd ("buffer-size", no_class,
3122 &record_btrace_conf.pt.size,
3123 _("Set the record/replay pt buffer size."),
3124 _("Show the record/replay pt buffer size."), _("\
3125Bigger buffers allow longer recording but also take more time to process \
3126the recorded execution.\n\
3127The actual buffer size may differ from the requested size. Use \"info record\" \
3128to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3129 &set_record_btrace_pt_cmdlist,
3130 &show_record_btrace_pt_cmdlist);
3131
afedecd3
MM
3132 init_record_btrace_ops ();
3133 add_target (&record_btrace_ops);
0b722aec
MM
3134
3135 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3136 xcalloc, xfree);
d33501a5
MM
3137
3138 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3139 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3140}