]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
btrace: Resume recording after disconnect.
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
afedecd3
MM
42
43/* The target_ops of record-btrace. */
44static struct target_ops record_btrace_ops;
45
46/* A new thread observer enabling branch tracing for the new thread. */
47static struct observer *record_btrace_thread_observer;
48
67b5c0c1
MM
49/* Memory access types used in set/show record btrace replay-memory-access. */
50static const char replay_memory_access_read_only[] = "read-only";
51static const char replay_memory_access_read_write[] = "read-write";
52static const char *const replay_memory_access_types[] =
53{
54 replay_memory_access_read_only,
55 replay_memory_access_read_write,
56 NULL
57};
58
59/* The currently allowed replay memory access type. */
60static const char *replay_memory_access = replay_memory_access_read_only;
61
62/* Command lists for "set/show record btrace". */
63static struct cmd_list_element *set_record_btrace_cmdlist;
64static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 65
70ad5bff
MM
66/* The execution direction of the last resume we got. See record-full.c. */
67static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
68
69/* The async event handler for reverse/replay execution. */
70static struct async_event_handler *record_btrace_async_inferior_event_handler;
71
aef92902
MM
72/* A flag indicating that we are currently generating a core file. */
73static int record_btrace_generating_corefile;
74
f4abbc16
MM
75/* The current branch trace configuration. */
76static struct btrace_config record_btrace_conf;
77
78/* Command list for "record btrace". */
79static struct cmd_list_element *record_btrace_cmdlist;
80
d33501a5
MM
81/* Command lists for "set/show record btrace bts". */
82static struct cmd_list_element *set_record_btrace_bts_cmdlist;
83static struct cmd_list_element *show_record_btrace_bts_cmdlist;
84
b20a6524
MM
85/* Command lists for "set/show record btrace pt". */
86static struct cmd_list_element *set_record_btrace_pt_cmdlist;
87static struct cmd_list_element *show_record_btrace_pt_cmdlist;
88
afedecd3
MM
89/* Print a record-btrace debug message. Use do ... while (0) to avoid
90 ambiguities when used in if statements. */
91
92#define DEBUG(msg, args...) \
93 do \
94 { \
95 if (record_debug != 0) \
96 fprintf_unfiltered (gdb_stdlog, \
97 "[record-btrace] " msg "\n", ##args); \
98 } \
99 while (0)
100
101
102/* Update the branch trace for the current thread and return a pointer to its
066ce621 103 thread_info.
afedecd3
MM
104
105 Throws an error if there is no thread or no trace. This function never
106 returns NULL. */
107
066ce621
MM
108static struct thread_info *
109require_btrace_thread (void)
afedecd3
MM
110{
111 struct thread_info *tp;
afedecd3
MM
112
113 DEBUG ("require");
114
115 tp = find_thread_ptid (inferior_ptid);
116 if (tp == NULL)
117 error (_("No thread."));
118
119 btrace_fetch (tp);
120
6e07b1d2 121 if (btrace_is_empty (tp))
afedecd3
MM
122 error (_("No trace."));
123
066ce621
MM
124 return tp;
125}
126
127/* Update the branch trace for the current thread and return a pointer to its
128 branch trace information struct.
129
130 Throws an error if there is no thread or no trace. This function never
131 returns NULL. */
132
133static struct btrace_thread_info *
134require_btrace (void)
135{
136 struct thread_info *tp;
137
138 tp = require_btrace_thread ();
139
140 return &tp->btrace;
afedecd3
MM
141}
142
143/* Enable branch tracing for one thread. Warn on errors. */
144
145static void
146record_btrace_enable_warn (struct thread_info *tp)
147{
492d29ea
PA
148 TRY
149 {
150 btrace_enable (tp, &record_btrace_conf);
151 }
152 CATCH (error, RETURN_MASK_ERROR)
153 {
154 warning ("%s", error.message);
155 }
156 END_CATCH
afedecd3
MM
157}
158
159/* Callback function to disable branch tracing for one thread. */
160
161static void
162record_btrace_disable_callback (void *arg)
163{
19ba03f4 164 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
165
166 btrace_disable (tp);
167}
168
169/* Enable automatic tracing of new threads. */
170
171static void
172record_btrace_auto_enable (void)
173{
174 DEBUG ("attach thread observer");
175
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
178}
179
180/* Disable automatic tracing of new threads. */
181
182static void
183record_btrace_auto_disable (void)
184{
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
187 return;
188
189 DEBUG ("detach thread observer");
190
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
193}
194
70ad5bff
MM
195/* The record-btrace async event handler function. */
196
197static void
198record_btrace_handle_async_inferior_event (gdb_client_data data)
199{
200 inferior_event_handler (INF_REG_EVENT, NULL);
201}
202
c0272db5
TW
203/* See record-btrace.h. */
204
205void
206record_btrace_push_target (void)
207{
208 const char *format;
209
210 record_btrace_auto_enable ();
211
212 push_target (&record_btrace_ops);
213
214 record_btrace_async_inferior_event_handler
215 = create_async_event_handler (record_btrace_handle_async_inferior_event,
216 NULL);
217 record_btrace_generating_corefile = 0;
218
219 format = btrace_format_short_string (record_btrace_conf.format);
220 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
221}
222
afedecd3
MM
223/* The to_open method of target record-btrace. */
224
225static void
014f9477 226record_btrace_open (const char *args, int from_tty)
afedecd3
MM
227{
228 struct cleanup *disable_chain;
229 struct thread_info *tp;
230
231 DEBUG ("open");
232
8213266a 233 record_preopen ();
afedecd3
MM
234
235 if (!target_has_execution)
236 error (_("The program is not being run."));
237
afedecd3
MM
238 gdb_assert (record_btrace_thread_observer == NULL);
239
240 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 241 ALL_NON_EXITED_THREADS (tp)
5d5658a1 242 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 243 {
f4abbc16 244 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
245
246 make_cleanup (record_btrace_disable_callback, tp);
247 }
248
c0272db5 249 record_btrace_push_target ();
afedecd3
MM
250
251 discard_cleanups (disable_chain);
252}
253
254/* The to_stop_recording method of target record-btrace. */
255
256static void
c6cd7c02 257record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
258{
259 struct thread_info *tp;
260
261 DEBUG ("stop recording");
262
263 record_btrace_auto_disable ();
264
034f788c 265 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
266 if (tp->btrace.target != NULL)
267 btrace_disable (tp);
268}
269
c0272db5
TW
270/* The to_disconnect method of target record-btrace. */
271
272static void
273record_btrace_disconnect (struct target_ops *self, const char *args,
274 int from_tty)
275{
276 struct target_ops *beneath = self->beneath;
277
278 /* Do not stop recording, just clean up GDB side. */
279 unpush_target (self);
280
281 /* Forward disconnect. */
282 beneath->to_disconnect (beneath, args, from_tty);
283}
284
afedecd3
MM
285/* The to_close method of target record-btrace. */
286
287static void
de90e03d 288record_btrace_close (struct target_ops *self)
afedecd3 289{
568e808b
MM
290 struct thread_info *tp;
291
70ad5bff
MM
292 if (record_btrace_async_inferior_event_handler != NULL)
293 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
294
99c819ee
MM
295 /* Make sure automatic recording gets disabled even if we did not stop
296 recording before closing the record-btrace target. */
297 record_btrace_auto_disable ();
298
568e808b
MM
299 /* We should have already stopped recording.
300 Tear down btrace in case we have not. */
034f788c 301 ALL_NON_EXITED_THREADS (tp)
568e808b 302 btrace_teardown (tp);
afedecd3
MM
303}
304
b7d2e916
PA
305/* The to_async method of target record-btrace. */
306
307static void
6a3753b3 308record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 309{
6a3753b3 310 if (enable)
b7d2e916
PA
311 mark_async_event_handler (record_btrace_async_inferior_event_handler);
312 else
313 clear_async_event_handler (record_btrace_async_inferior_event_handler);
314
6a3753b3 315 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
316}
317
d33501a5
MM
318/* Adjusts the size and returns a human readable size suffix. */
319
320static const char *
321record_btrace_adjust_size (unsigned int *size)
322{
323 unsigned int sz;
324
325 sz = *size;
326
327 if ((sz & ((1u << 30) - 1)) == 0)
328 {
329 *size = sz >> 30;
330 return "GB";
331 }
332 else if ((sz & ((1u << 20) - 1)) == 0)
333 {
334 *size = sz >> 20;
335 return "MB";
336 }
337 else if ((sz & ((1u << 10) - 1)) == 0)
338 {
339 *size = sz >> 10;
340 return "kB";
341 }
342 else
343 return "";
344}
345
346/* Print a BTS configuration. */
347
348static void
349record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
350{
351 const char *suffix;
352 unsigned int size;
353
354 size = conf->size;
355 if (size > 0)
356 {
357 suffix = record_btrace_adjust_size (&size);
358 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
359 }
360}
361
bc504a31 362/* Print an Intel Processor Trace configuration. */
b20a6524
MM
363
364static void
365record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
366{
367 const char *suffix;
368 unsigned int size;
369
370 size = conf->size;
371 if (size > 0)
372 {
373 suffix = record_btrace_adjust_size (&size);
374 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
375 }
376}
377
d33501a5
MM
378/* Print a branch tracing configuration. */
379
380static void
381record_btrace_print_conf (const struct btrace_config *conf)
382{
383 printf_unfiltered (_("Recording format: %s.\n"),
384 btrace_format_string (conf->format));
385
386 switch (conf->format)
387 {
388 case BTRACE_FORMAT_NONE:
389 return;
390
391 case BTRACE_FORMAT_BTS:
392 record_btrace_print_bts_conf (&conf->bts);
393 return;
b20a6524
MM
394
395 case BTRACE_FORMAT_PT:
396 record_btrace_print_pt_conf (&conf->pt);
397 return;
d33501a5
MM
398 }
399
400 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
401}
402
afedecd3
MM
403/* The to_info_record method of target record-btrace. */
404
405static void
630d6a4a 406record_btrace_info (struct target_ops *self)
afedecd3
MM
407{
408 struct btrace_thread_info *btinfo;
f4abbc16 409 const struct btrace_config *conf;
afedecd3 410 struct thread_info *tp;
31fd9caa 411 unsigned int insns, calls, gaps;
afedecd3
MM
412
413 DEBUG ("info");
414
415 tp = find_thread_ptid (inferior_ptid);
416 if (tp == NULL)
417 error (_("No thread."));
418
f4abbc16
MM
419 btinfo = &tp->btrace;
420
421 conf = btrace_conf (btinfo);
422 if (conf != NULL)
d33501a5 423 record_btrace_print_conf (conf);
f4abbc16 424
afedecd3
MM
425 btrace_fetch (tp);
426
23a7fe75
MM
427 insns = 0;
428 calls = 0;
31fd9caa 429 gaps = 0;
23a7fe75 430
6e07b1d2 431 if (!btrace_is_empty (tp))
23a7fe75
MM
432 {
433 struct btrace_call_iterator call;
434 struct btrace_insn_iterator insn;
435
436 btrace_call_end (&call, btinfo);
437 btrace_call_prev (&call, 1);
5de9129b 438 calls = btrace_call_number (&call);
23a7fe75
MM
439
440 btrace_insn_end (&insn, btinfo);
31fd9caa 441
5de9129b 442 insns = btrace_insn_number (&insn);
31fd9caa
MM
443 if (insns != 0)
444 {
445 /* The last instruction does not really belong to the trace. */
446 insns -= 1;
447 }
448 else
449 {
450 unsigned int steps;
451
452 /* Skip gaps at the end. */
453 do
454 {
455 steps = btrace_insn_prev (&insn, 1);
456 if (steps == 0)
457 break;
458
459 insns = btrace_insn_number (&insn);
460 }
461 while (insns == 0);
462 }
463
464 gaps = btinfo->ngaps;
23a7fe75 465 }
afedecd3 466
31fd9caa 467 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
468 "for thread %s (%s).\n"), insns, calls, gaps,
469 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
470
471 if (btrace_is_replaying (tp))
472 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
473 btrace_insn_number (btinfo->replay));
afedecd3
MM
474}
475
31fd9caa
MM
476/* Print a decode error. */
477
478static void
479btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
480 enum btrace_format format)
481{
482 const char *errstr;
483 int is_error;
484
485 errstr = _("unknown");
486 is_error = 1;
487
488 switch (format)
489 {
490 default:
491 break;
492
493 case BTRACE_FORMAT_BTS:
494 switch (errcode)
495 {
496 default:
497 break;
498
499 case BDE_BTS_OVERFLOW:
500 errstr = _("instruction overflow");
501 break;
502
503 case BDE_BTS_INSN_SIZE:
504 errstr = _("unknown instruction");
505 break;
506 }
507 break;
b20a6524
MM
508
509#if defined (HAVE_LIBIPT)
510 case BTRACE_FORMAT_PT:
511 switch (errcode)
512 {
513 case BDE_PT_USER_QUIT:
514 is_error = 0;
515 errstr = _("trace decode cancelled");
516 break;
517
518 case BDE_PT_DISABLED:
519 is_error = 0;
520 errstr = _("disabled");
521 break;
522
523 case BDE_PT_OVERFLOW:
524 is_error = 0;
525 errstr = _("overflow");
526 break;
527
528 default:
529 if (errcode < 0)
530 errstr = pt_errstr (pt_errcode (errcode));
531 break;
532 }
533 break;
534#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
535 }
536
537 ui_out_text (uiout, _("["));
538 if (is_error)
539 {
540 ui_out_text (uiout, _("decode error ("));
541 ui_out_field_int (uiout, "errcode", errcode);
542 ui_out_text (uiout, _("): "));
543 }
544 ui_out_text (uiout, errstr);
545 ui_out_text (uiout, _("]\n"));
546}
547
afedecd3
MM
548/* Print an unsigned int. */
549
550static void
551ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
552{
553 ui_out_field_fmt (uiout, fld, "%u", val);
554}
555
f94cc897
MM
556/* A range of source lines. */
557
558struct btrace_line_range
559{
560 /* The symtab this line is from. */
561 struct symtab *symtab;
562
563 /* The first line (inclusive). */
564 int begin;
565
566 /* The last line (exclusive). */
567 int end;
568};
569
570/* Construct a line range. */
571
572static struct btrace_line_range
573btrace_mk_line_range (struct symtab *symtab, int begin, int end)
574{
575 struct btrace_line_range range;
576
577 range.symtab = symtab;
578 range.begin = begin;
579 range.end = end;
580
581 return range;
582}
583
584/* Add a line to a line range. */
585
586static struct btrace_line_range
587btrace_line_range_add (struct btrace_line_range range, int line)
588{
589 if (range.end <= range.begin)
590 {
591 /* This is the first entry. */
592 range.begin = line;
593 range.end = line + 1;
594 }
595 else if (line < range.begin)
596 range.begin = line;
597 else if (range.end < line)
598 range.end = line;
599
600 return range;
601}
602
603/* Return non-zero if RANGE is empty, zero otherwise. */
604
605static int
606btrace_line_range_is_empty (struct btrace_line_range range)
607{
608 return range.end <= range.begin;
609}
610
611/* Return non-zero if LHS contains RHS, zero otherwise. */
612
613static int
614btrace_line_range_contains_range (struct btrace_line_range lhs,
615 struct btrace_line_range rhs)
616{
617 return ((lhs.symtab == rhs.symtab)
618 && (lhs.begin <= rhs.begin)
619 && (rhs.end <= lhs.end));
620}
621
622/* Find the line range associated with PC. */
623
624static struct btrace_line_range
625btrace_find_line_range (CORE_ADDR pc)
626{
627 struct btrace_line_range range;
628 struct linetable_entry *lines;
629 struct linetable *ltable;
630 struct symtab *symtab;
631 int nlines, i;
632
633 symtab = find_pc_line_symtab (pc);
634 if (symtab == NULL)
635 return btrace_mk_line_range (NULL, 0, 0);
636
637 ltable = SYMTAB_LINETABLE (symtab);
638 if (ltable == NULL)
639 return btrace_mk_line_range (symtab, 0, 0);
640
641 nlines = ltable->nitems;
642 lines = ltable->item;
643 if (nlines <= 0)
644 return btrace_mk_line_range (symtab, 0, 0);
645
646 range = btrace_mk_line_range (symtab, 0, 0);
647 for (i = 0; i < nlines - 1; i++)
648 {
649 if ((lines[i].pc == pc) && (lines[i].line != 0))
650 range = btrace_line_range_add (range, lines[i].line);
651 }
652
653 return range;
654}
655
656/* Print source lines in LINES to UIOUT.
657
658 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
659 instructions corresponding to that source line. When printing a new source
660 line, we do the cleanups for the open chain and open a new cleanup chain for
661 the new source line. If the source line range in LINES is not empty, this
662 function will leave the cleanup chain for the last printed source line open
663 so instructions can be added to it. */
664
665static void
666btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
667 struct cleanup **ui_item_chain, int flags)
668{
8d297bbf 669 print_source_lines_flags psl_flags;
f94cc897
MM
670 int line;
671
672 psl_flags = 0;
673 if (flags & DISASSEMBLY_FILENAME)
674 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
675
676 for (line = lines.begin; line < lines.end; ++line)
677 {
678 if (*ui_item_chain != NULL)
679 do_cleanups (*ui_item_chain);
680
681 *ui_item_chain
682 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
683
684 print_source_lines (lines.symtab, line, line + 1, psl_flags);
685
686 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
687 }
688}
689
afedecd3
MM
690/* Disassemble a section of the recorded instruction trace. */
691
692static void
23a7fe75 693btrace_insn_history (struct ui_out *uiout,
31fd9caa 694 const struct btrace_thread_info *btinfo,
23a7fe75
MM
695 const struct btrace_insn_iterator *begin,
696 const struct btrace_insn_iterator *end, int flags)
afedecd3 697{
f94cc897
MM
698 struct ui_file *stb;
699 struct cleanup *cleanups, *ui_item_chain;
700 struct disassemble_info di;
afedecd3 701 struct gdbarch *gdbarch;
23a7fe75 702 struct btrace_insn_iterator it;
f94cc897 703 struct btrace_line_range last_lines;
afedecd3 704
23a7fe75
MM
705 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
706 btrace_insn_number (end));
afedecd3 707
f94cc897
MM
708 flags |= DISASSEMBLY_SPECULATIVE;
709
afedecd3 710 gdbarch = target_gdbarch ();
f94cc897
MM
711 stb = mem_fileopen ();
712 cleanups = make_cleanup_ui_file_delete (stb);
713 di = gdb_disassemble_info (gdbarch, stb);
714 last_lines = btrace_mk_line_range (NULL, 0, 0);
715
716 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
717
718 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719 instructions corresponding to that line. */
720 ui_item_chain = NULL;
afedecd3 721
23a7fe75 722 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 723 {
23a7fe75
MM
724 const struct btrace_insn *insn;
725
726 insn = btrace_insn_get (&it);
727
31fd9caa
MM
728 /* A NULL instruction indicates a gap in the trace. */
729 if (insn == NULL)
730 {
731 const struct btrace_config *conf;
732
733 conf = btrace_conf (btinfo);
afedecd3 734
31fd9caa
MM
735 /* We have trace so we must have a configuration. */
736 gdb_assert (conf != NULL);
737
738 btrace_ui_out_decode_error (uiout, it.function->errcode,
739 conf->format);
740 }
741 else
742 {
f94cc897 743 struct disasm_insn dinsn;
da8c46d2 744
f94cc897 745 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 746 {
f94cc897
MM
747 struct btrace_line_range lines;
748
749 lines = btrace_find_line_range (insn->pc);
750 if (!btrace_line_range_is_empty (lines)
751 && !btrace_line_range_contains_range (last_lines, lines))
752 {
753 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
754 last_lines = lines;
755 }
756 else if (ui_item_chain == NULL)
757 {
758 ui_item_chain
759 = make_cleanup_ui_out_tuple_begin_end (uiout,
760 "src_and_asm_line");
761 /* No source information. */
762 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
763 }
764
765 gdb_assert (ui_item_chain != NULL);
da8c46d2 766 }
da8c46d2 767
f94cc897
MM
768 memset (&dinsn, 0, sizeof (dinsn));
769 dinsn.number = btrace_insn_number (&it);
770 dinsn.addr = insn->pc;
31fd9caa 771
da8c46d2 772 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 773 dinsn.is_speculative = 1;
da8c46d2 774
f94cc897 775 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
31fd9caa 776 }
afedecd3 777 }
f94cc897
MM
778
779 do_cleanups (cleanups);
afedecd3
MM
780}
781
782/* The to_insn_history method of target record-btrace. */
783
784static void
7a6c5609 785record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
786{
787 struct btrace_thread_info *btinfo;
23a7fe75
MM
788 struct btrace_insn_history *history;
789 struct btrace_insn_iterator begin, end;
afedecd3
MM
790 struct cleanup *uiout_cleanup;
791 struct ui_out *uiout;
23a7fe75 792 unsigned int context, covered;
afedecd3
MM
793
794 uiout = current_uiout;
795 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
796 "insn history");
afedecd3 797 context = abs (size);
afedecd3
MM
798 if (context == 0)
799 error (_("Bad record instruction-history-size."));
800
23a7fe75
MM
801 btinfo = require_btrace ();
802 history = btinfo->insn_history;
803 if (history == NULL)
afedecd3 804 {
07bbe694 805 struct btrace_insn_iterator *replay;
afedecd3 806
23a7fe75 807 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 808
07bbe694
MM
809 /* If we're replaying, we start at the replay position. Otherwise, we
810 start at the tail of the trace. */
811 replay = btinfo->replay;
812 if (replay != NULL)
813 begin = *replay;
814 else
815 btrace_insn_end (&begin, btinfo);
816
817 /* We start from here and expand in the requested direction. Then we
818 expand in the other direction, as well, to fill up any remaining
819 context. */
820 end = begin;
821 if (size < 0)
822 {
823 /* We want the current position covered, as well. */
824 covered = btrace_insn_next (&end, 1);
825 covered += btrace_insn_prev (&begin, context - covered);
826 covered += btrace_insn_next (&end, context - covered);
827 }
828 else
829 {
830 covered = btrace_insn_next (&end, context);
831 covered += btrace_insn_prev (&begin, context - covered);
832 }
afedecd3
MM
833 }
834 else
835 {
23a7fe75
MM
836 begin = history->begin;
837 end = history->end;
afedecd3 838
23a7fe75
MM
839 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
840 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 841
23a7fe75
MM
842 if (size < 0)
843 {
844 end = begin;
845 covered = btrace_insn_prev (&begin, context);
846 }
847 else
848 {
849 begin = end;
850 covered = btrace_insn_next (&end, context);
851 }
afedecd3
MM
852 }
853
23a7fe75 854 if (covered > 0)
31fd9caa 855 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
856 else
857 {
858 if (size < 0)
859 printf_unfiltered (_("At the start of the branch trace record.\n"));
860 else
861 printf_unfiltered (_("At the end of the branch trace record.\n"));
862 }
afedecd3 863
23a7fe75 864 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
865 do_cleanups (uiout_cleanup);
866}
867
868/* The to_insn_history_range method of target record-btrace. */
869
870static void
4e99c6b7
TT
871record_btrace_insn_history_range (struct target_ops *self,
872 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
873{
874 struct btrace_thread_info *btinfo;
23a7fe75
MM
875 struct btrace_insn_history *history;
876 struct btrace_insn_iterator begin, end;
afedecd3
MM
877 struct cleanup *uiout_cleanup;
878 struct ui_out *uiout;
23a7fe75
MM
879 unsigned int low, high;
880 int found;
afedecd3
MM
881
882 uiout = current_uiout;
883 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
884 "insn history");
23a7fe75
MM
885 low = from;
886 high = to;
afedecd3 887
23a7fe75 888 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
889
890 /* Check for wrap-arounds. */
23a7fe75 891 if (low != from || high != to)
afedecd3
MM
892 error (_("Bad range."));
893
0688d04e 894 if (high < low)
afedecd3
MM
895 error (_("Bad range."));
896
23a7fe75 897 btinfo = require_btrace ();
afedecd3 898
23a7fe75
MM
899 found = btrace_find_insn_by_number (&begin, btinfo, low);
900 if (found == 0)
901 error (_("Range out of bounds."));
afedecd3 902
23a7fe75
MM
903 found = btrace_find_insn_by_number (&end, btinfo, high);
904 if (found == 0)
0688d04e
MM
905 {
906 /* Silently truncate the range. */
907 btrace_insn_end (&end, btinfo);
908 }
909 else
910 {
911 /* We want both begin and end to be inclusive. */
912 btrace_insn_next (&end, 1);
913 }
afedecd3 914
31fd9caa 915 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 916 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
917
918 do_cleanups (uiout_cleanup);
919}
920
921/* The to_insn_history_from method of target record-btrace. */
922
923static void
9abc3ff3
TT
924record_btrace_insn_history_from (struct target_ops *self,
925 ULONGEST from, int size, int flags)
afedecd3
MM
926{
927 ULONGEST begin, end, context;
928
929 context = abs (size);
0688d04e
MM
930 if (context == 0)
931 error (_("Bad record instruction-history-size."));
afedecd3
MM
932
933 if (size < 0)
934 {
935 end = from;
936
937 if (from < context)
938 begin = 0;
939 else
0688d04e 940 begin = from - context + 1;
afedecd3
MM
941 }
942 else
943 {
944 begin = from;
0688d04e 945 end = from + context - 1;
afedecd3
MM
946
947 /* Check for wrap-around. */
948 if (end < begin)
949 end = ULONGEST_MAX;
950 }
951
4e99c6b7 952 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
953}
954
955/* Print the instruction number range for a function call history line. */
956
957static void
23a7fe75
MM
958btrace_call_history_insn_range (struct ui_out *uiout,
959 const struct btrace_function *bfun)
afedecd3 960{
7acbe133
MM
961 unsigned int begin, end, size;
962
963 size = VEC_length (btrace_insn_s, bfun->insn);
964 gdb_assert (size > 0);
afedecd3 965
23a7fe75 966 begin = bfun->insn_offset;
7acbe133 967 end = begin + size - 1;
afedecd3 968
23a7fe75 969 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 970 ui_out_text (uiout, ",");
23a7fe75 971 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
972}
973
ce0dfbea
MM
974/* Compute the lowest and highest source line for the instructions in BFUN
975 and return them in PBEGIN and PEND.
976 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
977 result from inlining or macro expansion. */
978
979static void
980btrace_compute_src_line_range (const struct btrace_function *bfun,
981 int *pbegin, int *pend)
982{
983 struct btrace_insn *insn;
984 struct symtab *symtab;
985 struct symbol *sym;
986 unsigned int idx;
987 int begin, end;
988
989 begin = INT_MAX;
990 end = INT_MIN;
991
992 sym = bfun->sym;
993 if (sym == NULL)
994 goto out;
995
996 symtab = symbol_symtab (sym);
997
998 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
999 {
1000 struct symtab_and_line sal;
1001
1002 sal = find_pc_line (insn->pc, 0);
1003 if (sal.symtab != symtab || sal.line == 0)
1004 continue;
1005
1006 begin = min (begin, sal.line);
1007 end = max (end, sal.line);
1008 }
1009
1010 out:
1011 *pbegin = begin;
1012 *pend = end;
1013}
1014
afedecd3
MM
1015/* Print the source line information for a function call history line. */
1016
1017static void
23a7fe75
MM
1018btrace_call_history_src_line (struct ui_out *uiout,
1019 const struct btrace_function *bfun)
afedecd3
MM
1020{
1021 struct symbol *sym;
23a7fe75 1022 int begin, end;
afedecd3
MM
1023
1024 sym = bfun->sym;
1025 if (sym == NULL)
1026 return;
1027
1028 ui_out_field_string (uiout, "file",
08be3fe3 1029 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1030
ce0dfbea 1031 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1032 if (end < begin)
afedecd3
MM
1033 return;
1034
1035 ui_out_text (uiout, ":");
23a7fe75 1036 ui_out_field_int (uiout, "min line", begin);
afedecd3 1037
23a7fe75 1038 if (end == begin)
afedecd3
MM
1039 return;
1040
8710b709 1041 ui_out_text (uiout, ",");
23a7fe75 1042 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
1043}
1044
0b722aec
MM
1045/* Get the name of a branch trace function. */
1046
1047static const char *
1048btrace_get_bfun_name (const struct btrace_function *bfun)
1049{
1050 struct minimal_symbol *msym;
1051 struct symbol *sym;
1052
1053 if (bfun == NULL)
1054 return "??";
1055
1056 msym = bfun->msym;
1057 sym = bfun->sym;
1058
1059 if (sym != NULL)
1060 return SYMBOL_PRINT_NAME (sym);
1061 else if (msym != NULL)
efd66ac6 1062 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1063 else
1064 return "??";
1065}
1066
afedecd3
MM
1067/* Disassemble a section of the recorded function trace. */
1068
1069static void
23a7fe75 1070btrace_call_history (struct ui_out *uiout,
8710b709 1071 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1072 const struct btrace_call_iterator *begin,
1073 const struct btrace_call_iterator *end,
8d297bbf 1074 int int_flags)
afedecd3 1075{
23a7fe75 1076 struct btrace_call_iterator it;
8d297bbf 1077 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1078
8d297bbf 1079 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1080 btrace_call_number (end));
afedecd3 1081
23a7fe75 1082 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1083 {
23a7fe75
MM
1084 const struct btrace_function *bfun;
1085 struct minimal_symbol *msym;
1086 struct symbol *sym;
1087
1088 bfun = btrace_call_get (&it);
23a7fe75 1089 sym = bfun->sym;
0b722aec 1090 msym = bfun->msym;
23a7fe75 1091
afedecd3 1092 /* Print the function index. */
23a7fe75 1093 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
1094 ui_out_text (uiout, "\t");
1095
31fd9caa
MM
1096 /* Indicate gaps in the trace. */
1097 if (bfun->errcode != 0)
1098 {
1099 const struct btrace_config *conf;
1100
1101 conf = btrace_conf (btinfo);
1102
1103 /* We have trace so we must have a configuration. */
1104 gdb_assert (conf != NULL);
1105
1106 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1107
1108 continue;
1109 }
1110
8710b709
MM
1111 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1112 {
1113 int level = bfun->level + btinfo->level, i;
1114
1115 for (i = 0; i < level; ++i)
1116 ui_out_text (uiout, " ");
1117 }
1118
1119 if (sym != NULL)
1120 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1121 else if (msym != NULL)
efd66ac6 1122 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
1123 else if (!ui_out_is_mi_like_p (uiout))
1124 ui_out_field_string (uiout, "function", "??");
1125
1e038f67 1126 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1127 {
8710b709 1128 ui_out_text (uiout, _("\tinst "));
23a7fe75 1129 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1130 }
1131
1e038f67 1132 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1133 {
8710b709 1134 ui_out_text (uiout, _("\tat "));
23a7fe75 1135 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1136 }
1137
afedecd3
MM
1138 ui_out_text (uiout, "\n");
1139 }
1140}
1141
1142/* The to_call_history method of target record-btrace. */
1143
1144static void
8d297bbf 1145record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1146{
1147 struct btrace_thread_info *btinfo;
23a7fe75
MM
1148 struct btrace_call_history *history;
1149 struct btrace_call_iterator begin, end;
afedecd3
MM
1150 struct cleanup *uiout_cleanup;
1151 struct ui_out *uiout;
23a7fe75 1152 unsigned int context, covered;
8d297bbf 1153 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1154
1155 uiout = current_uiout;
1156 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1157 "insn history");
afedecd3 1158 context = abs (size);
afedecd3
MM
1159 if (context == 0)
1160 error (_("Bad record function-call-history-size."));
1161
23a7fe75
MM
1162 btinfo = require_btrace ();
1163 history = btinfo->call_history;
1164 if (history == NULL)
afedecd3 1165 {
07bbe694 1166 struct btrace_insn_iterator *replay;
afedecd3 1167
8d297bbf 1168 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1169
07bbe694
MM
1170 /* If we're replaying, we start at the replay position. Otherwise, we
1171 start at the tail of the trace. */
1172 replay = btinfo->replay;
1173 if (replay != NULL)
1174 {
1175 begin.function = replay->function;
1176 begin.btinfo = btinfo;
1177 }
1178 else
1179 btrace_call_end (&begin, btinfo);
1180
1181 /* We start from here and expand in the requested direction. Then we
1182 expand in the other direction, as well, to fill up any remaining
1183 context. */
1184 end = begin;
1185 if (size < 0)
1186 {
1187 /* We want the current position covered, as well. */
1188 covered = btrace_call_next (&end, 1);
1189 covered += btrace_call_prev (&begin, context - covered);
1190 covered += btrace_call_next (&end, context - covered);
1191 }
1192 else
1193 {
1194 covered = btrace_call_next (&end, context);
1195 covered += btrace_call_prev (&begin, context- covered);
1196 }
afedecd3
MM
1197 }
1198 else
1199 {
23a7fe75
MM
1200 begin = history->begin;
1201 end = history->end;
afedecd3 1202
8d297bbf 1203 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1204 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1205
23a7fe75
MM
1206 if (size < 0)
1207 {
1208 end = begin;
1209 covered = btrace_call_prev (&begin, context);
1210 }
1211 else
1212 {
1213 begin = end;
1214 covered = btrace_call_next (&end, context);
1215 }
afedecd3
MM
1216 }
1217
23a7fe75 1218 if (covered > 0)
8710b709 1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1220 else
1221 {
1222 if (size < 0)
1223 printf_unfiltered (_("At the start of the branch trace record.\n"));
1224 else
1225 printf_unfiltered (_("At the end of the branch trace record.\n"));
1226 }
afedecd3 1227
23a7fe75 1228 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1229 do_cleanups (uiout_cleanup);
1230}
1231
1232/* The to_call_history_range method of target record-btrace. */
1233
1234static void
f0d960ea 1235record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1236 ULONGEST from, ULONGEST to,
1237 int int_flags)
afedecd3
MM
1238{
1239 struct btrace_thread_info *btinfo;
23a7fe75
MM
1240 struct btrace_call_history *history;
1241 struct btrace_call_iterator begin, end;
afedecd3
MM
1242 struct cleanup *uiout_cleanup;
1243 struct ui_out *uiout;
23a7fe75
MM
1244 unsigned int low, high;
1245 int found;
8d297bbf 1246 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1247
1248 uiout = current_uiout;
1249 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1250 "func history");
23a7fe75
MM
1251 low = from;
1252 high = to;
afedecd3 1253
8d297bbf 1254 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1255
1256 /* Check for wrap-arounds. */
23a7fe75 1257 if (low != from || high != to)
afedecd3
MM
1258 error (_("Bad range."));
1259
0688d04e 1260 if (high < low)
afedecd3
MM
1261 error (_("Bad range."));
1262
23a7fe75 1263 btinfo = require_btrace ();
afedecd3 1264
23a7fe75
MM
1265 found = btrace_find_call_by_number (&begin, btinfo, low);
1266 if (found == 0)
1267 error (_("Range out of bounds."));
afedecd3 1268
23a7fe75
MM
1269 found = btrace_find_call_by_number (&end, btinfo, high);
1270 if (found == 0)
0688d04e
MM
1271 {
1272 /* Silently truncate the range. */
1273 btrace_call_end (&end, btinfo);
1274 }
1275 else
1276 {
1277 /* We want both begin and end to be inclusive. */
1278 btrace_call_next (&end, 1);
1279 }
afedecd3 1280
8710b709 1281 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1282 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1283
1284 do_cleanups (uiout_cleanup);
1285}
1286
1287/* The to_call_history_from method of target record-btrace. */
1288
1289static void
ec0aea04 1290record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1291 ULONGEST from, int size,
1292 int int_flags)
afedecd3
MM
1293{
1294 ULONGEST begin, end, context;
8d297bbf 1295 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1296
1297 context = abs (size);
0688d04e
MM
1298 if (context == 0)
1299 error (_("Bad record function-call-history-size."));
afedecd3
MM
1300
1301 if (size < 0)
1302 {
1303 end = from;
1304
1305 if (from < context)
1306 begin = 0;
1307 else
0688d04e 1308 begin = from - context + 1;
afedecd3
MM
1309 }
1310 else
1311 {
1312 begin = from;
0688d04e 1313 end = from + context - 1;
afedecd3
MM
1314
1315 /* Check for wrap-around. */
1316 if (end < begin)
1317 end = ULONGEST_MAX;
1318 }
1319
f0d960ea 1320 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1321}
1322
07bbe694
MM
1323/* The to_record_is_replaying method of target record-btrace. */
1324
1325static int
a52eab48 1326record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1327{
1328 struct thread_info *tp;
1329
034f788c 1330 ALL_NON_EXITED_THREADS (tp)
a52eab48 1331 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1332 return 1;
1333
1334 return 0;
1335}
1336
7ff27e9b
MM
1337/* The to_record_will_replay method of target record-btrace. */
1338
1339static int
1340record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1341{
1342 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1343}
1344
633785ff
MM
1345/* The to_xfer_partial method of target record-btrace. */
1346
9b409511 1347static enum target_xfer_status
633785ff
MM
1348record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1349 const char *annex, gdb_byte *readbuf,
1350 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1351 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1352{
1353 struct target_ops *t;
1354
1355 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1356 if (replay_memory_access == replay_memory_access_read_only
aef92902 1357 && !record_btrace_generating_corefile
4d10e986 1358 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1359 {
1360 switch (object)
1361 {
1362 case TARGET_OBJECT_MEMORY:
1363 {
1364 struct target_section *section;
1365
1366 /* We do not allow writing memory in general. */
1367 if (writebuf != NULL)
9b409511
YQ
1368 {
1369 *xfered_len = len;
bc113b4e 1370 return TARGET_XFER_UNAVAILABLE;
9b409511 1371 }
633785ff
MM
1372
1373 /* We allow reading readonly memory. */
1374 section = target_section_by_addr (ops, offset);
1375 if (section != NULL)
1376 {
1377 /* Check if the section we found is readonly. */
1378 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1379 section->the_bfd_section)
1380 & SEC_READONLY) != 0)
1381 {
1382 /* Truncate the request to fit into this section. */
1383 len = min (len, section->endaddr - offset);
1384 break;
1385 }
1386 }
1387
9b409511 1388 *xfered_len = len;
bc113b4e 1389 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1390 }
1391 }
1392 }
1393
1394 /* Forward the request. */
e75fdfca
TT
1395 ops = ops->beneath;
1396 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1397 offset, len, xfered_len);
633785ff
MM
1398}
1399
1400/* The to_insert_breakpoint method of target record-btrace. */
1401
1402static int
1403record_btrace_insert_breakpoint (struct target_ops *ops,
1404 struct gdbarch *gdbarch,
1405 struct bp_target_info *bp_tgt)
1406{
67b5c0c1
MM
1407 const char *old;
1408 int ret;
633785ff
MM
1409
1410 /* Inserting breakpoints requires accessing memory. Allow it for the
1411 duration of this function. */
67b5c0c1
MM
1412 old = replay_memory_access;
1413 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1414
1415 ret = 0;
492d29ea
PA
1416 TRY
1417 {
1418 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1419 }
492d29ea
PA
1420 CATCH (except, RETURN_MASK_ALL)
1421 {
6c63c96a 1422 replay_memory_access = old;
492d29ea
PA
1423 throw_exception (except);
1424 }
1425 END_CATCH
6c63c96a 1426 replay_memory_access = old;
633785ff
MM
1427
1428 return ret;
1429}
1430
1431/* The to_remove_breakpoint method of target record-btrace. */
1432
1433static int
1434record_btrace_remove_breakpoint (struct target_ops *ops,
1435 struct gdbarch *gdbarch,
1436 struct bp_target_info *bp_tgt)
1437{
67b5c0c1
MM
1438 const char *old;
1439 int ret;
633785ff
MM
1440
1441 /* Removing breakpoints requires accessing memory. Allow it for the
1442 duration of this function. */
67b5c0c1
MM
1443 old = replay_memory_access;
1444 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1445
1446 ret = 0;
492d29ea
PA
1447 TRY
1448 {
1449 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1450 }
492d29ea
PA
1451 CATCH (except, RETURN_MASK_ALL)
1452 {
6c63c96a 1453 replay_memory_access = old;
492d29ea
PA
1454 throw_exception (except);
1455 }
1456 END_CATCH
6c63c96a 1457 replay_memory_access = old;
633785ff
MM
1458
1459 return ret;
1460}
1461
1f3ef581
MM
1462/* The to_fetch_registers method of target record-btrace. */
1463
1464static void
1465record_btrace_fetch_registers (struct target_ops *ops,
1466 struct regcache *regcache, int regno)
1467{
1468 struct btrace_insn_iterator *replay;
1469 struct thread_info *tp;
1470
1471 tp = find_thread_ptid (inferior_ptid);
1472 gdb_assert (tp != NULL);
1473
1474 replay = tp->btrace.replay;
aef92902 1475 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1476 {
1477 const struct btrace_insn *insn;
1478 struct gdbarch *gdbarch;
1479 int pcreg;
1480
1481 gdbarch = get_regcache_arch (regcache);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0)
1484 return;
1485
1486 /* We can only provide the PC register. */
1487 if (regno >= 0 && regno != pcreg)
1488 return;
1489
1490 insn = btrace_insn_get (replay);
1491 gdb_assert (insn != NULL);
1492
1493 regcache_raw_supply (regcache, regno, &insn->pc);
1494 }
1495 else
1496 {
e75fdfca 1497 struct target_ops *t = ops->beneath;
1f3ef581 1498
e75fdfca 1499 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1500 }
1501}
1502
1503/* The to_store_registers method of target record-btrace. */
1504
1505static void
1506record_btrace_store_registers (struct target_ops *ops,
1507 struct regcache *regcache, int regno)
1508{
1509 struct target_ops *t;
1510
a52eab48 1511 if (!record_btrace_generating_corefile
4d10e986
MM
1512 && record_btrace_is_replaying (ops, inferior_ptid))
1513 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1514
1515 gdb_assert (may_write_registers != 0);
1516
e75fdfca
TT
1517 t = ops->beneath;
1518 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1519}
1520
1521/* The to_prepare_to_store method of target record-btrace. */
1522
1523static void
1524record_btrace_prepare_to_store (struct target_ops *ops,
1525 struct regcache *regcache)
1526{
1527 struct target_ops *t;
1528
a52eab48 1529 if (!record_btrace_generating_corefile
4d10e986 1530 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1531 return;
1532
e75fdfca
TT
1533 t = ops->beneath;
1534 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1535}
1536
0b722aec
MM
1537/* The branch trace frame cache. */
1538
1539struct btrace_frame_cache
1540{
1541 /* The thread. */
1542 struct thread_info *tp;
1543
1544 /* The frame info. */
1545 struct frame_info *frame;
1546
1547 /* The branch trace function segment. */
1548 const struct btrace_function *bfun;
1549};
1550
1551/* A struct btrace_frame_cache hash table indexed by NEXT. */
1552
1553static htab_t bfcache;
1554
1555/* hash_f for htab_create_alloc of bfcache. */
1556
1557static hashval_t
1558bfcache_hash (const void *arg)
1559{
19ba03f4
SM
1560 const struct btrace_frame_cache *cache
1561 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1562
1563 return htab_hash_pointer (cache->frame);
1564}
1565
1566/* eq_f for htab_create_alloc of bfcache. */
1567
1568static int
1569bfcache_eq (const void *arg1, const void *arg2)
1570{
19ba03f4
SM
1571 const struct btrace_frame_cache *cache1
1572 = (const struct btrace_frame_cache *) arg1;
1573 const struct btrace_frame_cache *cache2
1574 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1575
1576 return cache1->frame == cache2->frame;
1577}
1578
1579/* Create a new btrace frame cache. */
1580
1581static struct btrace_frame_cache *
1582bfcache_new (struct frame_info *frame)
1583{
1584 struct btrace_frame_cache *cache;
1585 void **slot;
1586
1587 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1588 cache->frame = frame;
1589
1590 slot = htab_find_slot (bfcache, cache, INSERT);
1591 gdb_assert (*slot == NULL);
1592 *slot = cache;
1593
1594 return cache;
1595}
1596
1597/* Extract the branch trace function from a branch trace frame. */
1598
1599static const struct btrace_function *
1600btrace_get_frame_function (struct frame_info *frame)
1601{
1602 const struct btrace_frame_cache *cache;
1603 const struct btrace_function *bfun;
1604 struct btrace_frame_cache pattern;
1605 void **slot;
1606
1607 pattern.frame = frame;
1608
1609 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1610 if (slot == NULL)
1611 return NULL;
1612
19ba03f4 1613 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1614 return cache->bfun;
1615}
1616
cecac1ab
MM
1617/* Implement stop_reason method for record_btrace_frame_unwind. */
1618
1619static enum unwind_stop_reason
1620record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1621 void **this_cache)
1622{
0b722aec
MM
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun;
1625
19ba03f4 1626 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1627 bfun = cache->bfun;
1628 gdb_assert (bfun != NULL);
1629
1630 if (bfun->up == NULL)
1631 return UNWIND_UNAVAILABLE;
1632
1633 return UNWIND_NO_REASON;
cecac1ab
MM
1634}
1635
1636/* Implement this_id method for record_btrace_frame_unwind. */
1637
1638static void
1639record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1640 struct frame_id *this_id)
1641{
0b722aec
MM
1642 const struct btrace_frame_cache *cache;
1643 const struct btrace_function *bfun;
1644 CORE_ADDR code, special;
1645
19ba03f4 1646 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1647
1648 bfun = cache->bfun;
1649 gdb_assert (bfun != NULL);
1650
1651 while (bfun->segment.prev != NULL)
1652 bfun = bfun->segment.prev;
1653
1654 code = get_frame_func (this_frame);
1655 special = bfun->number;
1656
1657 *this_id = frame_id_build_unavailable_stack_special (code, special);
1658
1659 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1660 btrace_get_bfun_name (cache->bfun),
1661 core_addr_to_string_nz (this_id->code_addr),
1662 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1663}
1664
1665/* Implement prev_register method for record_btrace_frame_unwind. */
1666
1667static struct value *
1668record_btrace_frame_prev_register (struct frame_info *this_frame,
1669 void **this_cache,
1670 int regnum)
1671{
0b722aec
MM
1672 const struct btrace_frame_cache *cache;
1673 const struct btrace_function *bfun, *caller;
1674 const struct btrace_insn *insn;
1675 struct gdbarch *gdbarch;
1676 CORE_ADDR pc;
1677 int pcreg;
1678
1679 gdbarch = get_frame_arch (this_frame);
1680 pcreg = gdbarch_pc_regnum (gdbarch);
1681 if (pcreg < 0 || regnum != pcreg)
1682 throw_error (NOT_AVAILABLE_ERROR,
1683 _("Registers are not available in btrace record history"));
1684
19ba03f4 1685 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1686 bfun = cache->bfun;
1687 gdb_assert (bfun != NULL);
1688
1689 caller = bfun->up;
1690 if (caller == NULL)
1691 throw_error (NOT_AVAILABLE_ERROR,
1692 _("No caller in btrace record history"));
1693
1694 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1695 {
1696 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1697 pc = insn->pc;
1698 }
1699 else
1700 {
1701 insn = VEC_last (btrace_insn_s, caller->insn);
1702 pc = insn->pc;
1703
1704 pc += gdb_insn_length (gdbarch, pc);
1705 }
1706
1707 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1708 btrace_get_bfun_name (bfun), bfun->level,
1709 core_addr_to_string_nz (pc));
1710
1711 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1712}
1713
1714/* Implement sniffer method for record_btrace_frame_unwind. */
1715
1716static int
1717record_btrace_frame_sniffer (const struct frame_unwind *self,
1718 struct frame_info *this_frame,
1719 void **this_cache)
1720{
0b722aec
MM
1721 const struct btrace_function *bfun;
1722 struct btrace_frame_cache *cache;
cecac1ab 1723 struct thread_info *tp;
0b722aec 1724 struct frame_info *next;
cecac1ab
MM
1725
1726 /* THIS_FRAME does not contain a reference to its thread. */
1727 tp = find_thread_ptid (inferior_ptid);
1728 gdb_assert (tp != NULL);
1729
0b722aec
MM
1730 bfun = NULL;
1731 next = get_next_frame (this_frame);
1732 if (next == NULL)
1733 {
1734 const struct btrace_insn_iterator *replay;
1735
1736 replay = tp->btrace.replay;
1737 if (replay != NULL)
1738 bfun = replay->function;
1739 }
1740 else
1741 {
1742 const struct btrace_function *callee;
1743
1744 callee = btrace_get_frame_function (next);
1745 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1746 bfun = callee->up;
1747 }
1748
1749 if (bfun == NULL)
1750 return 0;
1751
1752 DEBUG ("[frame] sniffed frame for %s on level %d",
1753 btrace_get_bfun_name (bfun), bfun->level);
1754
1755 /* This is our frame. Initialize the frame cache. */
1756 cache = bfcache_new (this_frame);
1757 cache->tp = tp;
1758 cache->bfun = bfun;
1759
1760 *this_cache = cache;
1761 return 1;
1762}
1763
1764/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1765
1766static int
1767record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1768 struct frame_info *this_frame,
1769 void **this_cache)
1770{
1771 const struct btrace_function *bfun, *callee;
1772 struct btrace_frame_cache *cache;
1773 struct frame_info *next;
1774
1775 next = get_next_frame (this_frame);
1776 if (next == NULL)
1777 return 0;
1778
1779 callee = btrace_get_frame_function (next);
1780 if (callee == NULL)
1781 return 0;
1782
1783 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1784 return 0;
1785
1786 bfun = callee->up;
1787 if (bfun == NULL)
1788 return 0;
1789
1790 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1791 btrace_get_bfun_name (bfun), bfun->level);
1792
1793 /* This is our frame. Initialize the frame cache. */
1794 cache = bfcache_new (this_frame);
1795 cache->tp = find_thread_ptid (inferior_ptid);
1796 cache->bfun = bfun;
1797
1798 *this_cache = cache;
1799 return 1;
1800}
1801
1802static void
1803record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1804{
1805 struct btrace_frame_cache *cache;
1806 void **slot;
1807
19ba03f4 1808 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1809
1810 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1811 gdb_assert (slot != NULL);
1812
1813 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1814}
1815
1816/* btrace recording does not store previous memory content, neither the stack
1817 frames content. Any unwinding would return errorneous results as the stack
1818 contents no longer matches the changed PC value restored from history.
1819 Therefore this unwinder reports any possibly unwound registers as
1820 <unavailable>. */
1821
0b722aec 1822const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1823{
1824 NORMAL_FRAME,
1825 record_btrace_frame_unwind_stop_reason,
1826 record_btrace_frame_this_id,
1827 record_btrace_frame_prev_register,
1828 NULL,
0b722aec
MM
1829 record_btrace_frame_sniffer,
1830 record_btrace_frame_dealloc_cache
1831};
1832
1833const struct frame_unwind record_btrace_tailcall_frame_unwind =
1834{
1835 TAILCALL_FRAME,
1836 record_btrace_frame_unwind_stop_reason,
1837 record_btrace_frame_this_id,
1838 record_btrace_frame_prev_register,
1839 NULL,
1840 record_btrace_tailcall_frame_sniffer,
1841 record_btrace_frame_dealloc_cache
cecac1ab 1842};
b2f4cfde 1843
ac01945b
TT
1844/* Implement the to_get_unwinder method. */
1845
1846static const struct frame_unwind *
1847record_btrace_to_get_unwinder (struct target_ops *self)
1848{
1849 return &record_btrace_frame_unwind;
1850}
1851
1852/* Implement the to_get_tailcall_unwinder method. */
1853
1854static const struct frame_unwind *
1855record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1856{
1857 return &record_btrace_tailcall_frame_unwind;
1858}
1859
987e68b1
MM
1860/* Return a human-readable string for FLAG. */
1861
1862static const char *
1863btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1864{
1865 switch (flag)
1866 {
1867 case BTHR_STEP:
1868 return "step";
1869
1870 case BTHR_RSTEP:
1871 return "reverse-step";
1872
1873 case BTHR_CONT:
1874 return "cont";
1875
1876 case BTHR_RCONT:
1877 return "reverse-cont";
1878
1879 case BTHR_STOP:
1880 return "stop";
1881 }
1882
1883 return "<invalid>";
1884}
1885
52834460
MM
1886/* Indicate that TP should be resumed according to FLAG. */
1887
1888static void
1889record_btrace_resume_thread (struct thread_info *tp,
1890 enum btrace_thread_flag flag)
1891{
1892 struct btrace_thread_info *btinfo;
1893
43792cf0 1894 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1895 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1896
1897 btinfo = &tp->btrace;
1898
52834460
MM
1899 /* Fetch the latest branch trace. */
1900 btrace_fetch (tp);
1901
0ca912df
MM
1902 /* A resume request overwrites a preceding resume or stop request. */
1903 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1904 btinfo->flags |= flag;
1905}
1906
ec71cc2f
MM
1907/* Get the current frame for TP. */
1908
1909static struct frame_info *
1910get_thread_current_frame (struct thread_info *tp)
1911{
1912 struct frame_info *frame;
1913 ptid_t old_inferior_ptid;
1914 int executing;
1915
1916 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1917 old_inferior_ptid = inferior_ptid;
1918 inferior_ptid = tp->ptid;
1919
1920 /* Clear the executing flag to allow changes to the current frame.
1921 We are not actually running, yet. We just started a reverse execution
1922 command or a record goto command.
1923 For the latter, EXECUTING is false and this has no effect.
1924 For the former, EXECUTING is true and we're in to_wait, about to
1925 move the thread. Since we need to recompute the stack, we temporarily
1926 set EXECUTING to flase. */
1927 executing = is_executing (inferior_ptid);
1928 set_executing (inferior_ptid, 0);
1929
1930 frame = NULL;
1931 TRY
1932 {
1933 frame = get_current_frame ();
1934 }
1935 CATCH (except, RETURN_MASK_ALL)
1936 {
1937 /* Restore the previous execution state. */
1938 set_executing (inferior_ptid, executing);
1939
1940 /* Restore the previous inferior_ptid. */
1941 inferior_ptid = old_inferior_ptid;
1942
1943 throw_exception (except);
1944 }
1945 END_CATCH
1946
1947 /* Restore the previous execution state. */
1948 set_executing (inferior_ptid, executing);
1949
1950 /* Restore the previous inferior_ptid. */
1951 inferior_ptid = old_inferior_ptid;
1952
1953 return frame;
1954}
1955
52834460
MM
1956/* Start replaying a thread. */
1957
1958static struct btrace_insn_iterator *
1959record_btrace_start_replaying (struct thread_info *tp)
1960{
52834460
MM
1961 struct btrace_insn_iterator *replay;
1962 struct btrace_thread_info *btinfo;
52834460
MM
1963
1964 btinfo = &tp->btrace;
1965 replay = NULL;
1966
1967 /* We can't start replaying without trace. */
1968 if (btinfo->begin == NULL)
1969 return NULL;
1970
52834460
MM
1971 /* GDB stores the current frame_id when stepping in order to detects steps
1972 into subroutines.
1973 Since frames are computed differently when we're replaying, we need to
1974 recompute those stored frames and fix them up so we can still detect
1975 subroutines after we started replaying. */
492d29ea 1976 TRY
52834460
MM
1977 {
1978 struct frame_info *frame;
1979 struct frame_id frame_id;
1980 int upd_step_frame_id, upd_step_stack_frame_id;
1981
1982 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1983 frame = get_thread_current_frame (tp);
52834460
MM
1984 frame_id = get_frame_id (frame);
1985
1986 /* Check if we need to update any stepping-related frame id's. */
1987 upd_step_frame_id = frame_id_eq (frame_id,
1988 tp->control.step_frame_id);
1989 upd_step_stack_frame_id = frame_id_eq (frame_id,
1990 tp->control.step_stack_frame_id);
1991
1992 /* We start replaying at the end of the branch trace. This corresponds
1993 to the current instruction. */
8d749320 1994 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1995 btrace_insn_end (replay, btinfo);
1996
31fd9caa
MM
1997 /* Skip gaps at the end of the trace. */
1998 while (btrace_insn_get (replay) == NULL)
1999 {
2000 unsigned int steps;
2001
2002 steps = btrace_insn_prev (replay, 1);
2003 if (steps == 0)
2004 error (_("No trace."));
2005 }
2006
52834460
MM
2007 /* We're not replaying, yet. */
2008 gdb_assert (btinfo->replay == NULL);
2009 btinfo->replay = replay;
2010
2011 /* Make sure we're not using any stale registers. */
2012 registers_changed_ptid (tp->ptid);
2013
2014 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2015 frame = get_thread_current_frame (tp);
52834460
MM
2016 frame_id = get_frame_id (frame);
2017
2018 /* Replace stepping related frames where necessary. */
2019 if (upd_step_frame_id)
2020 tp->control.step_frame_id = frame_id;
2021 if (upd_step_stack_frame_id)
2022 tp->control.step_stack_frame_id = frame_id;
2023 }
492d29ea 2024 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2025 {
2026 xfree (btinfo->replay);
2027 btinfo->replay = NULL;
2028
2029 registers_changed_ptid (tp->ptid);
2030
2031 throw_exception (except);
2032 }
492d29ea 2033 END_CATCH
52834460
MM
2034
2035 return replay;
2036}
2037
2038/* Stop replaying a thread. */
2039
2040static void
2041record_btrace_stop_replaying (struct thread_info *tp)
2042{
2043 struct btrace_thread_info *btinfo;
2044
2045 btinfo = &tp->btrace;
2046
2047 xfree (btinfo->replay);
2048 btinfo->replay = NULL;
2049
2050 /* Make sure we're not leaving any stale registers. */
2051 registers_changed_ptid (tp->ptid);
2052}
2053
e3cfc1c7
MM
2054/* Stop replaying TP if it is at the end of its execution history. */
2055
2056static void
2057record_btrace_stop_replaying_at_end (struct thread_info *tp)
2058{
2059 struct btrace_insn_iterator *replay, end;
2060 struct btrace_thread_info *btinfo;
2061
2062 btinfo = &tp->btrace;
2063 replay = btinfo->replay;
2064
2065 if (replay == NULL)
2066 return;
2067
2068 btrace_insn_end (&end, btinfo);
2069
2070 if (btrace_insn_cmp (replay, &end) == 0)
2071 record_btrace_stop_replaying (tp);
2072}
2073
b2f4cfde
MM
2074/* The to_resume method of target record-btrace. */
2075
2076static void
2077record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2078 enum gdb_signal signal)
2079{
0ca912df 2080 struct thread_info *tp;
d2939ba2 2081 enum btrace_thread_flag flag, cflag;
52834460 2082
987e68b1
MM
2083 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2084 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2085 step ? "step" : "cont");
52834460 2086
0ca912df
MM
2087 /* Store the execution direction of the last resume.
2088
2089 If there is more than one to_resume call, we have to rely on infrun
2090 to not change the execution direction in-between. */
70ad5bff
MM
2091 record_btrace_resume_exec_dir = execution_direction;
2092
0ca912df 2093 /* As long as we're not replaying, just forward the request.
52834460 2094
0ca912df
MM
2095 For non-stop targets this means that no thread is replaying. In order to
2096 make progress, we may need to explicitly move replaying threads to the end
2097 of their execution history. */
a52eab48
MM
2098 if ((execution_direction != EXEC_REVERSE)
2099 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2100 {
e75fdfca 2101 ops = ops->beneath;
04c4fe8c
MM
2102 ops->to_resume (ops, ptid, step, signal);
2103 return;
b2f4cfde
MM
2104 }
2105
52834460 2106 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2107 if (execution_direction == EXEC_REVERSE)
2108 {
2109 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2110 cflag = BTHR_RCONT;
2111 }
52834460 2112 else
d2939ba2
MM
2113 {
2114 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2115 cflag = BTHR_CONT;
2116 }
52834460 2117
52834460 2118 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2119 record_btrace_wait below.
2120
2121 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2122 if (!target_is_non_stop_p ())
2123 {
2124 gdb_assert (ptid_match (inferior_ptid, ptid));
2125
2126 ALL_NON_EXITED_THREADS (tp)
2127 if (ptid_match (tp->ptid, ptid))
2128 {
2129 if (ptid_match (tp->ptid, inferior_ptid))
2130 record_btrace_resume_thread (tp, flag);
2131 else
2132 record_btrace_resume_thread (tp, cflag);
2133 }
2134 }
2135 else
2136 {
2137 ALL_NON_EXITED_THREADS (tp)
2138 if (ptid_match (tp->ptid, ptid))
2139 record_btrace_resume_thread (tp, flag);
2140 }
70ad5bff
MM
2141
2142 /* Async support. */
2143 if (target_can_async_p ())
2144 {
6a3753b3 2145 target_async (1);
70ad5bff
MM
2146 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2147 }
52834460
MM
2148}
2149
987e68b1
MM
2150/* Cancel resuming TP. */
2151
2152static void
2153record_btrace_cancel_resume (struct thread_info *tp)
2154{
2155 enum btrace_thread_flag flags;
2156
2157 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2158 if (flags == 0)
2159 return;
2160
43792cf0
PA
2161 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2162 print_thread_id (tp),
987e68b1
MM
2163 target_pid_to_str (tp->ptid), flags,
2164 btrace_thread_flag_to_str (flags));
2165
2166 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2167 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2168}
2169
2170/* Return a target_waitstatus indicating that we ran out of history. */
2171
2172static struct target_waitstatus
2173btrace_step_no_history (void)
2174{
2175 struct target_waitstatus status;
2176
2177 status.kind = TARGET_WAITKIND_NO_HISTORY;
2178
2179 return status;
2180}
2181
2182/* Return a target_waitstatus indicating that a step finished. */
2183
2184static struct target_waitstatus
2185btrace_step_stopped (void)
2186{
2187 struct target_waitstatus status;
2188
2189 status.kind = TARGET_WAITKIND_STOPPED;
2190 status.value.sig = GDB_SIGNAL_TRAP;
2191
2192 return status;
2193}
2194
6e4879f0
MM
2195/* Return a target_waitstatus indicating that a thread was stopped as
2196 requested. */
2197
2198static struct target_waitstatus
2199btrace_step_stopped_on_request (void)
2200{
2201 struct target_waitstatus status;
2202
2203 status.kind = TARGET_WAITKIND_STOPPED;
2204 status.value.sig = GDB_SIGNAL_0;
2205
2206 return status;
2207}
2208
d825d248
MM
2209/* Return a target_waitstatus indicating a spurious stop. */
2210
2211static struct target_waitstatus
2212btrace_step_spurious (void)
2213{
2214 struct target_waitstatus status;
2215
2216 status.kind = TARGET_WAITKIND_SPURIOUS;
2217
2218 return status;
2219}
2220
e3cfc1c7
MM
2221/* Return a target_waitstatus indicating that the thread was not resumed. */
2222
2223static struct target_waitstatus
2224btrace_step_no_resumed (void)
2225{
2226 struct target_waitstatus status;
2227
2228 status.kind = TARGET_WAITKIND_NO_RESUMED;
2229
2230 return status;
2231}
2232
2233/* Return a target_waitstatus indicating that we should wait again. */
2234
2235static struct target_waitstatus
2236btrace_step_again (void)
2237{
2238 struct target_waitstatus status;
2239
2240 status.kind = TARGET_WAITKIND_IGNORE;
2241
2242 return status;
2243}
2244
52834460
MM
2245/* Clear the record histories. */
2246
2247static void
2248record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2249{
2250 xfree (btinfo->insn_history);
2251 xfree (btinfo->call_history);
2252
2253 btinfo->insn_history = NULL;
2254 btinfo->call_history = NULL;
2255}
2256
3c615f99
MM
2257/* Check whether TP's current replay position is at a breakpoint. */
2258
2259static int
2260record_btrace_replay_at_breakpoint (struct thread_info *tp)
2261{
2262 struct btrace_insn_iterator *replay;
2263 struct btrace_thread_info *btinfo;
2264 const struct btrace_insn *insn;
2265 struct inferior *inf;
2266
2267 btinfo = &tp->btrace;
2268 replay = btinfo->replay;
2269
2270 if (replay == NULL)
2271 return 0;
2272
2273 insn = btrace_insn_get (replay);
2274 if (insn == NULL)
2275 return 0;
2276
2277 inf = find_inferior_ptid (tp->ptid);
2278 if (inf == NULL)
2279 return 0;
2280
2281 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2282 &btinfo->stop_reason);
2283}
2284
d825d248 2285/* Step one instruction in forward direction. */
52834460
MM
2286
2287static struct target_waitstatus
d825d248 2288record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2289{
2290 struct btrace_insn_iterator *replay, end;
2291 struct btrace_thread_info *btinfo;
52834460 2292
d825d248
MM
2293 btinfo = &tp->btrace;
2294 replay = btinfo->replay;
2295
2296 /* We're done if we're not replaying. */
2297 if (replay == NULL)
2298 return btrace_step_no_history ();
2299
011c71b6
MM
2300 /* Check if we're stepping a breakpoint. */
2301 if (record_btrace_replay_at_breakpoint (tp))
2302 return btrace_step_stopped ();
2303
d825d248
MM
2304 /* Skip gaps during replay. */
2305 do
2306 {
2307 unsigned int steps;
2308
e3cfc1c7
MM
2309 /* We will bail out here if we continue stepping after reaching the end
2310 of the execution history. */
d825d248
MM
2311 steps = btrace_insn_next (replay, 1);
2312 if (steps == 0)
e3cfc1c7 2313 return btrace_step_no_history ();
d825d248
MM
2314 }
2315 while (btrace_insn_get (replay) == NULL);
2316
2317 /* Determine the end of the instruction trace. */
2318 btrace_insn_end (&end, btinfo);
2319
e3cfc1c7
MM
2320 /* The execution trace contains (and ends with) the current instruction.
2321 This instruction has not been executed, yet, so the trace really ends
2322 one instruction earlier. */
d825d248 2323 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2324 return btrace_step_no_history ();
d825d248
MM
2325
2326 return btrace_step_spurious ();
2327}
2328
2329/* Step one instruction in backward direction. */
2330
2331static struct target_waitstatus
2332record_btrace_single_step_backward (struct thread_info *tp)
2333{
2334 struct btrace_insn_iterator *replay;
2335 struct btrace_thread_info *btinfo;
e59fa00f 2336
52834460
MM
2337 btinfo = &tp->btrace;
2338 replay = btinfo->replay;
2339
d825d248
MM
2340 /* Start replaying if we're not already doing so. */
2341 if (replay == NULL)
2342 replay = record_btrace_start_replaying (tp);
2343
2344 /* If we can't step any further, we reached the end of the history.
2345 Skip gaps during replay. */
2346 do
2347 {
2348 unsigned int steps;
2349
2350 steps = btrace_insn_prev (replay, 1);
2351 if (steps == 0)
2352 return btrace_step_no_history ();
2353 }
2354 while (btrace_insn_get (replay) == NULL);
2355
011c71b6
MM
2356 /* Check if we're stepping a breakpoint.
2357
2358 For reverse-stepping, this check is after the step. There is logic in
2359 infrun.c that handles reverse-stepping separately. See, for example,
2360 proceed and adjust_pc_after_break.
2361
2362 This code assumes that for reverse-stepping, PC points to the last
2363 de-executed instruction, whereas for forward-stepping PC points to the
2364 next to-be-executed instruction. */
2365 if (record_btrace_replay_at_breakpoint (tp))
2366 return btrace_step_stopped ();
2367
d825d248
MM
2368 return btrace_step_spurious ();
2369}
2370
2371/* Step a single thread. */
2372
2373static struct target_waitstatus
2374record_btrace_step_thread (struct thread_info *tp)
2375{
2376 struct btrace_thread_info *btinfo;
2377 struct target_waitstatus status;
2378 enum btrace_thread_flag flags;
2379
2380 btinfo = &tp->btrace;
2381
6e4879f0
MM
2382 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2383 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2384
43792cf0 2385 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2386 target_pid_to_str (tp->ptid), flags,
2387 btrace_thread_flag_to_str (flags));
52834460 2388
6e4879f0
MM
2389 /* We can't step without an execution history. */
2390 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2391 return btrace_step_no_history ();
2392
52834460
MM
2393 switch (flags)
2394 {
2395 default:
2396 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2397
6e4879f0
MM
2398 case BTHR_STOP:
2399 return btrace_step_stopped_on_request ();
2400
52834460 2401 case BTHR_STEP:
d825d248
MM
2402 status = record_btrace_single_step_forward (tp);
2403 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2404 break;
52834460
MM
2405
2406 return btrace_step_stopped ();
2407
2408 case BTHR_RSTEP:
d825d248
MM
2409 status = record_btrace_single_step_backward (tp);
2410 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2411 break;
52834460
MM
2412
2413 return btrace_step_stopped ();
2414
2415 case BTHR_CONT:
e3cfc1c7
MM
2416 status = record_btrace_single_step_forward (tp);
2417 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2418 break;
52834460 2419
e3cfc1c7
MM
2420 btinfo->flags |= flags;
2421 return btrace_step_again ();
52834460
MM
2422
2423 case BTHR_RCONT:
e3cfc1c7
MM
2424 status = record_btrace_single_step_backward (tp);
2425 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2426 break;
52834460 2427
e3cfc1c7
MM
2428 btinfo->flags |= flags;
2429 return btrace_step_again ();
2430 }
d825d248 2431
e3cfc1c7
MM
2432 /* We keep threads moving at the end of their execution history. The to_wait
2433 method will stop the thread for whom the event is reported. */
2434 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2435 btinfo->flags |= flags;
52834460 2436
e3cfc1c7 2437 return status;
b2f4cfde
MM
2438}
2439
e3cfc1c7
MM
2440/* A vector of threads. */
2441
2442typedef struct thread_info * tp_t;
2443DEF_VEC_P (tp_t);
2444
a6b5be76
MM
2445/* Announce further events if necessary. */
2446
2447static void
2448record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2449 const VEC (tp_t) *no_history)
2450{
2451 int more_moving, more_no_history;
2452
2453 more_moving = !VEC_empty (tp_t, moving);
2454 more_no_history = !VEC_empty (tp_t, no_history);
2455
2456 if (!more_moving && !more_no_history)
2457 return;
2458
2459 if (more_moving)
2460 DEBUG ("movers pending");
2461
2462 if (more_no_history)
2463 DEBUG ("no-history pending");
2464
2465 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2466}
2467
b2f4cfde
MM
2468/* The to_wait method of target record-btrace. */
2469
2470static ptid_t
2471record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2472 struct target_waitstatus *status, int options)
2473{
e3cfc1c7
MM
2474 VEC (tp_t) *moving, *no_history;
2475 struct thread_info *tp, *eventing;
2476 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2477
2478 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2479
b2f4cfde 2480 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2481 if ((execution_direction != EXEC_REVERSE)
2482 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2483 {
e75fdfca
TT
2484 ops = ops->beneath;
2485 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2486 }
2487
e3cfc1c7
MM
2488 moving = NULL;
2489 no_history = NULL;
2490
2491 make_cleanup (VEC_cleanup (tp_t), &moving);
2492 make_cleanup (VEC_cleanup (tp_t), &no_history);
2493
2494 /* Keep a work list of moving threads. */
2495 ALL_NON_EXITED_THREADS (tp)
2496 if (ptid_match (tp->ptid, ptid)
2497 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2498 VEC_safe_push (tp_t, moving, tp);
2499
2500 if (VEC_empty (tp_t, moving))
52834460 2501 {
e3cfc1c7 2502 *status = btrace_step_no_resumed ();
52834460 2503
e3cfc1c7
MM
2504 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2505 target_waitstatus_to_string (status));
2506
2507 do_cleanups (cleanups);
2508 return null_ptid;
52834460
MM
2509 }
2510
e3cfc1c7
MM
2511 /* Step moving threads one by one, one step each, until either one thread
2512 reports an event or we run out of threads to step.
2513
2514 When stepping more than one thread, chances are that some threads reach
2515 the end of their execution history earlier than others. If we reported
2516 this immediately, all-stop on top of non-stop would stop all threads and
2517 resume the same threads next time. And we would report the same thread
2518 having reached the end of its execution history again.
2519
2520 In the worst case, this would starve the other threads. But even if other
2521 threads would be allowed to make progress, this would result in far too
2522 many intermediate stops.
2523
2524 We therefore delay the reporting of "no execution history" until we have
2525 nothing else to report. By this time, all threads should have moved to
2526 either the beginning or the end of their execution history. There will
2527 be a single user-visible stop. */
2528 eventing = NULL;
2529 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2530 {
2531 unsigned int ix;
2532
2533 ix = 0;
2534 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2535 {
2536 *status = record_btrace_step_thread (tp);
2537
2538 switch (status->kind)
2539 {
2540 case TARGET_WAITKIND_IGNORE:
2541 ix++;
2542 break;
2543
2544 case TARGET_WAITKIND_NO_HISTORY:
2545 VEC_safe_push (tp_t, no_history,
2546 VEC_ordered_remove (tp_t, moving, ix));
2547 break;
2548
2549 default:
2550 eventing = VEC_unordered_remove (tp_t, moving, ix);
2551 break;
2552 }
2553 }
2554 }
2555
2556 if (eventing == NULL)
2557 {
2558 /* We started with at least one moving thread. This thread must have
2559 either stopped or reached the end of its execution history.
2560
2561 In the former case, EVENTING must not be NULL.
2562 In the latter case, NO_HISTORY must not be empty. */
2563 gdb_assert (!VEC_empty (tp_t, no_history));
2564
2565 /* We kept threads moving at the end of their execution history. Stop
2566 EVENTING now that we are going to report its stop. */
2567 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2568 eventing->btrace.flags &= ~BTHR_MOVE;
2569
2570 *status = btrace_step_no_history ();
2571 }
2572
2573 gdb_assert (eventing != NULL);
2574
2575 /* We kept threads replaying at the end of their execution history. Stop
2576 replaying EVENTING now that we are going to report its stop. */
2577 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2578
2579 /* Stop all other threads. */
5953356c 2580 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2581 ALL_NON_EXITED_THREADS (tp)
2582 record_btrace_cancel_resume (tp);
52834460 2583
a6b5be76
MM
2584 /* In async mode, we need to announce further events. */
2585 if (target_is_async_p ())
2586 record_btrace_maybe_mark_async_event (moving, no_history);
2587
52834460 2588 /* Start record histories anew from the current position. */
e3cfc1c7 2589 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2590
2591 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2592 registers_changed_ptid (eventing->ptid);
2593
43792cf0
PA
2594 DEBUG ("wait ended by thread %s (%s): %s",
2595 print_thread_id (eventing),
e3cfc1c7
MM
2596 target_pid_to_str (eventing->ptid),
2597 target_waitstatus_to_string (status));
52834460 2598
e3cfc1c7
MM
2599 do_cleanups (cleanups);
2600 return eventing->ptid;
52834460
MM
2601}
2602
6e4879f0
MM
2603/* The to_stop method of target record-btrace. */
2604
2605static void
2606record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2607{
2608 DEBUG ("stop %s", target_pid_to_str (ptid));
2609
2610 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2611 if ((execution_direction != EXEC_REVERSE)
2612 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2613 {
2614 ops = ops->beneath;
2615 ops->to_stop (ops, ptid);
2616 }
2617 else
2618 {
2619 struct thread_info *tp;
2620
2621 ALL_NON_EXITED_THREADS (tp)
2622 if (ptid_match (tp->ptid, ptid))
2623 {
2624 tp->btrace.flags &= ~BTHR_MOVE;
2625 tp->btrace.flags |= BTHR_STOP;
2626 }
2627 }
2628 }
2629
52834460
MM
2630/* The to_can_execute_reverse method of target record-btrace. */
2631
2632static int
19db3e69 2633record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2634{
2635 return 1;
2636}
2637
9e8915c6 2638/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2639
9e8915c6
PA
2640static int
2641record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2642{
a52eab48 2643 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2644 {
2645 struct thread_info *tp = inferior_thread ();
2646
2647 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2648 }
2649
2650 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2651}
2652
2653/* The to_supports_stopped_by_sw_breakpoint method of target
2654 record-btrace. */
2655
2656static int
2657record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2658{
a52eab48 2659 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2660 return 1;
2661
2662 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2663}
2664
2665/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2666
2667static int
2668record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2669{
a52eab48 2670 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2671 {
2672 struct thread_info *tp = inferior_thread ();
2673
2674 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2675 }
2676
2677 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2678}
2679
2680/* The to_supports_stopped_by_hw_breakpoint method of target
2681 record-btrace. */
2682
2683static int
2684record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2685{
a52eab48 2686 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2687 return 1;
52834460 2688
9e8915c6 2689 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2690}
2691
e8032dde 2692/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2693
2694static void
e8032dde 2695record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2696{
e8032dde 2697 /* We don't add or remove threads during replay. */
a52eab48 2698 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2699 return;
2700
2701 /* Forward the request. */
e75fdfca 2702 ops = ops->beneath;
e8032dde 2703 ops->to_update_thread_list (ops);
e2887aa3
MM
2704}
2705
2706/* The to_thread_alive method of target record-btrace. */
2707
2708static int
2709record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2710{
2711 /* We don't add or remove threads during replay. */
a52eab48 2712 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2713 return find_thread_ptid (ptid) != NULL;
2714
2715 /* Forward the request. */
e75fdfca
TT
2716 ops = ops->beneath;
2717 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2718}
2719
066ce621
MM
2720/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2721 is stopped. */
2722
2723static void
2724record_btrace_set_replay (struct thread_info *tp,
2725 const struct btrace_insn_iterator *it)
2726{
2727 struct btrace_thread_info *btinfo;
2728
2729 btinfo = &tp->btrace;
2730
2731 if (it == NULL || it->function == NULL)
52834460 2732 record_btrace_stop_replaying (tp);
066ce621
MM
2733 else
2734 {
2735 if (btinfo->replay == NULL)
52834460 2736 record_btrace_start_replaying (tp);
066ce621
MM
2737 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2738 return;
2739
2740 *btinfo->replay = *it;
52834460 2741 registers_changed_ptid (tp->ptid);
066ce621
MM
2742 }
2743
52834460
MM
2744 /* Start anew from the new replay position. */
2745 record_btrace_clear_histories (btinfo);
485668e5
MM
2746
2747 stop_pc = regcache_read_pc (get_current_regcache ());
2748 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2749}
2750
2751/* The to_goto_record_begin method of target record-btrace. */
2752
2753static void
08475817 2754record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2755{
2756 struct thread_info *tp;
2757 struct btrace_insn_iterator begin;
2758
2759 tp = require_btrace_thread ();
2760
2761 btrace_insn_begin (&begin, &tp->btrace);
2762 record_btrace_set_replay (tp, &begin);
066ce621
MM
2763}
2764
2765/* The to_goto_record_end method of target record-btrace. */
2766
2767static void
307a1b91 2768record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2769{
2770 struct thread_info *tp;
2771
2772 tp = require_btrace_thread ();
2773
2774 record_btrace_set_replay (tp, NULL);
066ce621
MM
2775}
2776
2777/* The to_goto_record method of target record-btrace. */
2778
2779static void
606183ac 2780record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2781{
2782 struct thread_info *tp;
2783 struct btrace_insn_iterator it;
2784 unsigned int number;
2785 int found;
2786
2787 number = insn;
2788
2789 /* Check for wrap-arounds. */
2790 if (number != insn)
2791 error (_("Instruction number out of range."));
2792
2793 tp = require_btrace_thread ();
2794
2795 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2796 if (found == 0)
2797 error (_("No such instruction."));
2798
2799 record_btrace_set_replay (tp, &it);
066ce621
MM
2800}
2801
797094dd
MM
2802/* The to_record_stop_replaying method of target record-btrace. */
2803
2804static void
2805record_btrace_stop_replaying_all (struct target_ops *self)
2806{
2807 struct thread_info *tp;
2808
2809 ALL_NON_EXITED_THREADS (tp)
2810 record_btrace_stop_replaying (tp);
2811}
2812
70ad5bff
MM
2813/* The to_execution_direction target method. */
2814
2815static enum exec_direction_kind
2816record_btrace_execution_direction (struct target_ops *self)
2817{
2818 return record_btrace_resume_exec_dir;
2819}
2820
aef92902
MM
2821/* The to_prepare_to_generate_core target method. */
2822
2823static void
2824record_btrace_prepare_to_generate_core (struct target_ops *self)
2825{
2826 record_btrace_generating_corefile = 1;
2827}
2828
2829/* The to_done_generating_core target method. */
2830
2831static void
2832record_btrace_done_generating_core (struct target_ops *self)
2833{
2834 record_btrace_generating_corefile = 0;
2835}
2836
afedecd3
MM
2837/* Initialize the record-btrace target ops. */
2838
2839static void
2840init_record_btrace_ops (void)
2841{
2842 struct target_ops *ops;
2843
2844 ops = &record_btrace_ops;
2845 ops->to_shortname = "record-btrace";
2846 ops->to_longname = "Branch tracing target";
2847 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2848 ops->to_open = record_btrace_open;
2849 ops->to_close = record_btrace_close;
b7d2e916 2850 ops->to_async = record_btrace_async;
afedecd3 2851 ops->to_detach = record_detach;
c0272db5 2852 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2853 ops->to_mourn_inferior = record_mourn_inferior;
2854 ops->to_kill = record_kill;
afedecd3
MM
2855 ops->to_stop_recording = record_btrace_stop_recording;
2856 ops->to_info_record = record_btrace_info;
2857 ops->to_insn_history = record_btrace_insn_history;
2858 ops->to_insn_history_from = record_btrace_insn_history_from;
2859 ops->to_insn_history_range = record_btrace_insn_history_range;
2860 ops->to_call_history = record_btrace_call_history;
2861 ops->to_call_history_from = record_btrace_call_history_from;
2862 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2863 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2864 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2865 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2866 ops->to_xfer_partial = record_btrace_xfer_partial;
2867 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2868 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2869 ops->to_fetch_registers = record_btrace_fetch_registers;
2870 ops->to_store_registers = record_btrace_store_registers;
2871 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2872 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2873 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2874 ops->to_resume = record_btrace_resume;
2875 ops->to_wait = record_btrace_wait;
6e4879f0 2876 ops->to_stop = record_btrace_stop;
e8032dde 2877 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2878 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2879 ops->to_goto_record_begin = record_btrace_goto_begin;
2880 ops->to_goto_record_end = record_btrace_goto_end;
2881 ops->to_goto_record = record_btrace_goto;
52834460 2882 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2883 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2884 ops->to_supports_stopped_by_sw_breakpoint
2885 = record_btrace_supports_stopped_by_sw_breakpoint;
2886 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2887 ops->to_supports_stopped_by_hw_breakpoint
2888 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2889 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2890 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2891 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2892 ops->to_stratum = record_stratum;
2893 ops->to_magic = OPS_MAGIC;
2894}
2895
f4abbc16
MM
2896/* Start recording in BTS format. */
2897
2898static void
2899cmd_record_btrace_bts_start (char *args, int from_tty)
2900{
f4abbc16
MM
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2903
2904 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2905
492d29ea
PA
2906 TRY
2907 {
2908 execute_command ("target record-btrace", from_tty);
2909 }
2910 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2911 {
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2913 throw_exception (exception);
2914 }
492d29ea 2915 END_CATCH
f4abbc16
MM
2916}
2917
bc504a31 2918/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2919
2920static void
b20a6524 2921cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2922{
2923 if (args != NULL && *args != 0)
2924 error (_("Invalid argument."));
2925
b20a6524 2926 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2927
492d29ea
PA
2928 TRY
2929 {
2930 execute_command ("target record-btrace", from_tty);
2931 }
2932 CATCH (exception, RETURN_MASK_ALL)
2933 {
2934 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2935 throw_exception (exception);
2936 }
2937 END_CATCH
afedecd3
MM
2938}
2939
b20a6524
MM
2940/* Alias for "target record". */
2941
2942static void
2943cmd_record_btrace_start (char *args, int from_tty)
2944{
2945 if (args != NULL && *args != 0)
2946 error (_("Invalid argument."));
2947
2948 record_btrace_conf.format = BTRACE_FORMAT_PT;
2949
2950 TRY
2951 {
2952 execute_command ("target record-btrace", from_tty);
2953 }
2954 CATCH (exception, RETURN_MASK_ALL)
2955 {
2956 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2957
2958 TRY
2959 {
2960 execute_command ("target record-btrace", from_tty);
2961 }
2962 CATCH (exception, RETURN_MASK_ALL)
2963 {
2964 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2965 throw_exception (exception);
2966 }
2967 END_CATCH
2968 }
2969 END_CATCH
2970}
2971
67b5c0c1
MM
2972/* The "set record btrace" command. */
2973
2974static void
2975cmd_set_record_btrace (char *args, int from_tty)
2976{
2977 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2978}
2979
2980/* The "show record btrace" command. */
2981
2982static void
2983cmd_show_record_btrace (char *args, int from_tty)
2984{
2985 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2986}
2987
2988/* The "show record btrace replay-memory-access" command. */
2989
2990static void
2991cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2992 struct cmd_list_element *c, const char *value)
2993{
2994 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2995 replay_memory_access);
2996}
2997
d33501a5
MM
2998/* The "set record btrace bts" command. */
2999
3000static void
3001cmd_set_record_btrace_bts (char *args, int from_tty)
3002{
3003 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3004 "by an appropriate subcommand.\n"));
d33501a5
MM
3005 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3006 all_commands, gdb_stdout);
3007}
3008
3009/* The "show record btrace bts" command. */
3010
3011static void
3012cmd_show_record_btrace_bts (char *args, int from_tty)
3013{
3014 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3015}
3016
b20a6524
MM
3017/* The "set record btrace pt" command. */
3018
3019static void
3020cmd_set_record_btrace_pt (char *args, int from_tty)
3021{
3022 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3023 "by an appropriate subcommand.\n"));
3024 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3025 all_commands, gdb_stdout);
3026}
3027
3028/* The "show record btrace pt" command. */
3029
3030static void
3031cmd_show_record_btrace_pt (char *args, int from_tty)
3032{
3033 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3034}
3035
3036/* The "record bts buffer-size" show value function. */
3037
3038static void
3039show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3040 struct cmd_list_element *c,
3041 const char *value)
3042{
3043 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3044 value);
3045}
3046
3047/* The "record pt buffer-size" show value function. */
3048
3049static void
3050show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3051 struct cmd_list_element *c,
3052 const char *value)
3053{
3054 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3055 value);
3056}
3057
afedecd3
MM
3058void _initialize_record_btrace (void);
3059
3060/* Initialize btrace commands. */
3061
3062void
3063_initialize_record_btrace (void)
3064{
f4abbc16
MM
3065 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3066 _("Start branch trace recording."), &record_btrace_cmdlist,
3067 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3068 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3069
f4abbc16
MM
3070 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3071 _("\
3072Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3073The processor stores a from/to record for each branch into a cyclic buffer.\n\
3074This format may not be available on all processors."),
3075 &record_btrace_cmdlist);
3076 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3077
b20a6524
MM
3078 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3079 _("\
bc504a31 3080Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3081This format may not be available on all processors."),
3082 &record_btrace_cmdlist);
3083 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3084
67b5c0c1
MM
3085 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3086 _("Set record options"), &set_record_btrace_cmdlist,
3087 "set record btrace ", 0, &set_record_cmdlist);
3088
3089 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3090 _("Show record options"), &show_record_btrace_cmdlist,
3091 "show record btrace ", 0, &show_record_cmdlist);
3092
3093 add_setshow_enum_cmd ("replay-memory-access", no_class,
3094 replay_memory_access_types, &replay_memory_access, _("\
3095Set what memory accesses are allowed during replay."), _("\
3096Show what memory accesses are allowed during replay."),
3097 _("Default is READ-ONLY.\n\n\
3098The btrace record target does not trace data.\n\
3099The memory therefore corresponds to the live target and not \
3100to the current replay position.\n\n\
3101When READ-ONLY, allow accesses to read-only memory during replay.\n\
3102When READ-WRITE, allow accesses to read-only and read-write memory during \
3103replay."),
3104 NULL, cmd_show_replay_memory_access,
3105 &set_record_btrace_cmdlist,
3106 &show_record_btrace_cmdlist);
3107
d33501a5
MM
3108 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3109 _("Set record btrace bts options"),
3110 &set_record_btrace_bts_cmdlist,
3111 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3112
3113 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3114 _("Show record btrace bts options"),
3115 &show_record_btrace_bts_cmdlist,
3116 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3117
3118 add_setshow_uinteger_cmd ("buffer-size", no_class,
3119 &record_btrace_conf.bts.size,
3120 _("Set the record/replay bts buffer size."),
3121 _("Show the record/replay bts buffer size."), _("\
3122When starting recording request a trace buffer of this size. \
3123The actual buffer size may differ from the requested size. \
3124Use \"info record\" to see the actual buffer size.\n\n\
3125Bigger buffers allow longer recording but also take more time to process \
3126the recorded execution trace.\n\n\
b20a6524
MM
3127The trace buffer size may not be changed while recording."), NULL,
3128 show_record_bts_buffer_size_value,
d33501a5
MM
3129 &set_record_btrace_bts_cmdlist,
3130 &show_record_btrace_bts_cmdlist);
3131
b20a6524
MM
3132 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3133 _("Set record btrace pt options"),
3134 &set_record_btrace_pt_cmdlist,
3135 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3136
3137 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3138 _("Show record btrace pt options"),
3139 &show_record_btrace_pt_cmdlist,
3140 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3141
3142 add_setshow_uinteger_cmd ("buffer-size", no_class,
3143 &record_btrace_conf.pt.size,
3144 _("Set the record/replay pt buffer size."),
3145 _("Show the record/replay pt buffer size."), _("\
3146Bigger buffers allow longer recording but also take more time to process \
3147the recorded execution.\n\
3148The actual buffer size may differ from the requested size. Use \"info record\" \
3149to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3150 &set_record_btrace_pt_cmdlist,
3151 &show_record_btrace_pt_cmdlist);
3152
afedecd3
MM
3153 init_record_btrace_ops ();
3154 add_target (&record_btrace_ops);
0b722aec
MM
3155
3156 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3157 xcalloc, xfree);
d33501a5
MM
3158
3159 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3160 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3161}