]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
PowerPC VLE insn set additions
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
162/* Callback function to disable branch tracing for one thread. */
163
164static void
165record_btrace_disable_callback (void *arg)
166{
19ba03f4 167 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
168
169 btrace_disable (tp);
170}
171
172/* Enable automatic tracing of new threads. */
173
174static void
175record_btrace_auto_enable (void)
176{
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181}
182
183/* Disable automatic tracing of new threads. */
184
185static void
186record_btrace_auto_disable (void)
187{
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196}
197
70ad5bff
MM
198/* The record-btrace async event handler function. */
199
200static void
201record_btrace_handle_async_inferior_event (gdb_client_data data)
202{
203 inferior_event_handler (INF_REG_EVENT, NULL);
204}
205
c0272db5
TW
206/* See record-btrace.h. */
207
208void
209record_btrace_push_target (void)
210{
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224}
225
afedecd3
MM
226/* The to_open method of target record-btrace. */
227
228static void
014f9477 229record_btrace_open (const char *args, int from_tty)
afedecd3
MM
230{
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
8213266a 236 record_preopen ();
afedecd3
MM
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
afedecd3
MM
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 244 ALL_NON_EXITED_THREADS (tp)
5d5658a1 245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 246 {
f4abbc16 247 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
c0272db5 252 record_btrace_push_target ();
afedecd3
MM
253
254 discard_cleanups (disable_chain);
255}
256
257/* The to_stop_recording method of target record-btrace. */
258
259static void
c6cd7c02 260record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
261{
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
034f788c 268 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271}
272
c0272db5
TW
273/* The to_disconnect method of target record-btrace. */
274
275static void
276record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278{
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286}
287
afedecd3
MM
288/* The to_close method of target record-btrace. */
289
290static void
de90e03d 291record_btrace_close (struct target_ops *self)
afedecd3 292{
568e808b
MM
293 struct thread_info *tp;
294
70ad5bff
MM
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
99c819ee
MM
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
568e808b
MM
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
034f788c 304 ALL_NON_EXITED_THREADS (tp)
568e808b 305 btrace_teardown (tp);
afedecd3
MM
306}
307
b7d2e916
PA
308/* The to_async method of target record-btrace. */
309
310static void
6a3753b3 311record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 312{
6a3753b3 313 if (enable)
b7d2e916
PA
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
6a3753b3 318 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
319}
320
d33501a5
MM
321/* Adjusts the size and returns a human readable size suffix. */
322
323static const char *
324record_btrace_adjust_size (unsigned int *size)
325{
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347}
348
349/* Print a BTS configuration. */
350
351static void
352record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353{
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363}
364
bc504a31 365/* Print an Intel Processor Trace configuration. */
b20a6524
MM
366
367static void
368record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369{
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379}
380
d33501a5
MM
381/* Print a branch tracing configuration. */
382
383static void
384record_btrace_print_conf (const struct btrace_config *conf)
385{
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
b20a6524
MM
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
d33501a5
MM
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404}
405
afedecd3
MM
406/* The to_info_record method of target record-btrace. */
407
408static void
630d6a4a 409record_btrace_info (struct target_ops *self)
afedecd3
MM
410{
411 struct btrace_thread_info *btinfo;
f4abbc16 412 const struct btrace_config *conf;
afedecd3 413 struct thread_info *tp;
31fd9caa 414 unsigned int insns, calls, gaps;
afedecd3
MM
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
cd4007e4
MM
422 validate_registers_access ();
423
f4abbc16
MM
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
d33501a5 428 record_btrace_print_conf (conf);
f4abbc16 429
afedecd3
MM
430 btrace_fetch (tp);
431
23a7fe75
MM
432 insns = 0;
433 calls = 0;
31fd9caa 434 gaps = 0;
23a7fe75 435
6e07b1d2 436 if (!btrace_is_empty (tp))
23a7fe75
MM
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
5de9129b 443 calls = btrace_call_number (&call);
23a7fe75
MM
444
445 btrace_insn_end (&insn, btinfo);
5de9129b 446 insns = btrace_insn_number (&insn);
31fd9caa 447
69090cee
TW
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
31fd9caa
MM
452
453 gaps = btinfo->ngaps;
23a7fe75 454 }
afedecd3 455
31fd9caa 456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
afedecd3
MM
463}
464
31fd9caa
MM
465/* Print a decode error. */
466
467static void
468btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470{
508352a9 471 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 472
112e8700 473 uiout->text (_("["));
508352a9
TW
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 476 {
112e8700
SM
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
31fd9caa 480 }
112e8700
SM
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
31fd9caa
MM
483}
484
afedecd3
MM
485/* Print an unsigned int. */
486
487static void
488ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489{
112e8700 490 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
491}
492
f94cc897
MM
493/* A range of source lines. */
494
495struct btrace_line_range
496{
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505};
506
507/* Construct a line range. */
508
509static struct btrace_line_range
510btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511{
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519}
520
521/* Add a line to a line range. */
522
523static struct btrace_line_range
524btrace_line_range_add (struct btrace_line_range range, int line)
525{
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538}
539
540/* Return non-zero if RANGE is empty, zero otherwise. */
541
542static int
543btrace_line_range_is_empty (struct btrace_line_range range)
544{
545 return range.end <= range.begin;
546}
547
548/* Return non-zero if LHS contains RHS, zero otherwise. */
549
550static int
551btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553{
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557}
558
559/* Find the line range associated with PC. */
560
561static struct btrace_line_range
562btrace_find_line_range (CORE_ADDR pc)
563{
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591}
592
593/* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602static void
603btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605{
8d297bbf 606 print_source_lines_flags psl_flags;
f94cc897
MM
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625}
626
afedecd3
MM
627/* Disassemble a section of the recorded instruction trace. */
628
629static void
23a7fe75 630btrace_insn_history (struct ui_out *uiout,
31fd9caa 631 const struct btrace_thread_info *btinfo,
23a7fe75
MM
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
afedecd3 634{
f94cc897 635 struct cleanup *cleanups, *ui_item_chain;
afedecd3 636 struct gdbarch *gdbarch;
23a7fe75 637 struct btrace_insn_iterator it;
f94cc897 638 struct btrace_line_range last_lines;
afedecd3 639
23a7fe75
MM
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
afedecd3 642
f94cc897
MM
643 flags |= DISASSEMBLY_SPECULATIVE;
644
afedecd3 645 gdbarch = target_gdbarch ();
f94cc897
MM
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
187808b0 648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
f94cc897
MM
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
afedecd3 653
8b172ce7
PA
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
23a7fe75 656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 657 {
23a7fe75
MM
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
31fd9caa
MM
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
afedecd3 668
31fd9caa
MM
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
69090cee
TW
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
677 conf->format);
678 }
679 else
680 {
f94cc897 681 struct disasm_insn dinsn;
da8c46d2 682
f94cc897 683 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 684 {
f94cc897
MM
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
da8c46d2 704 }
da8c46d2 705
f94cc897
MM
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
31fd9caa 709
da8c46d2 710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 711 dinsn.is_speculative = 1;
da8c46d2 712
8b172ce7 713 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 714 }
afedecd3 715 }
f94cc897
MM
716
717 do_cleanups (cleanups);
afedecd3
MM
718}
719
720/* The to_insn_history method of target record-btrace. */
721
722static void
7a6c5609 723record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
724{
725 struct btrace_thread_info *btinfo;
23a7fe75
MM
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
afedecd3
MM
728 struct cleanup *uiout_cleanup;
729 struct ui_out *uiout;
23a7fe75 730 unsigned int context, covered;
afedecd3
MM
731
732 uiout = current_uiout;
733 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
734 "insn history");
afedecd3 735 context = abs (size);
afedecd3
MM
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
23a7fe75
MM
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
afedecd3 742 {
07bbe694 743 struct btrace_insn_iterator *replay;
afedecd3 744
23a7fe75 745 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 746
07bbe694
MM
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
afedecd3
MM
771 }
772 else
773 {
23a7fe75
MM
774 begin = history->begin;
775 end = history->end;
afedecd3 776
23a7fe75
MM
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 779
23a7fe75
MM
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
afedecd3
MM
790 }
791
23a7fe75 792 if (covered > 0)
31fd9caa 793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
afedecd3 801
23a7fe75 802 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
803 do_cleanups (uiout_cleanup);
804}
805
806/* The to_insn_history_range method of target record-btrace. */
807
808static void
4e99c6b7
TT
809record_btrace_insn_history_range (struct target_ops *self,
810 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
811{
812 struct btrace_thread_info *btinfo;
23a7fe75
MM
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
afedecd3
MM
815 struct cleanup *uiout_cleanup;
816 struct ui_out *uiout;
23a7fe75
MM
817 unsigned int low, high;
818 int found;
afedecd3
MM
819
820 uiout = current_uiout;
821 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
822 "insn history");
23a7fe75
MM
823 low = from;
824 high = to;
afedecd3 825
23a7fe75 826 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
827
828 /* Check for wrap-arounds. */
23a7fe75 829 if (low != from || high != to)
afedecd3
MM
830 error (_("Bad range."));
831
0688d04e 832 if (high < low)
afedecd3
MM
833 error (_("Bad range."));
834
23a7fe75 835 btinfo = require_btrace ();
afedecd3 836
23a7fe75
MM
837 found = btrace_find_insn_by_number (&begin, btinfo, low);
838 if (found == 0)
839 error (_("Range out of bounds."));
afedecd3 840
23a7fe75
MM
841 found = btrace_find_insn_by_number (&end, btinfo, high);
842 if (found == 0)
0688d04e
MM
843 {
844 /* Silently truncate the range. */
845 btrace_insn_end (&end, btinfo);
846 }
847 else
848 {
849 /* We want both begin and end to be inclusive. */
850 btrace_insn_next (&end, 1);
851 }
afedecd3 852
31fd9caa 853 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 854 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
855
856 do_cleanups (uiout_cleanup);
857}
858
859/* The to_insn_history_from method of target record-btrace. */
860
861static void
9abc3ff3
TT
862record_btrace_insn_history_from (struct target_ops *self,
863 ULONGEST from, int size, int flags)
afedecd3
MM
864{
865 ULONGEST begin, end, context;
866
867 context = abs (size);
0688d04e
MM
868 if (context == 0)
869 error (_("Bad record instruction-history-size."));
afedecd3
MM
870
871 if (size < 0)
872 {
873 end = from;
874
875 if (from < context)
876 begin = 0;
877 else
0688d04e 878 begin = from - context + 1;
afedecd3
MM
879 }
880 else
881 {
882 begin = from;
0688d04e 883 end = from + context - 1;
afedecd3
MM
884
885 /* Check for wrap-around. */
886 if (end < begin)
887 end = ULONGEST_MAX;
888 }
889
4e99c6b7 890 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
891}
892
893/* Print the instruction number range for a function call history line. */
894
895static void
23a7fe75
MM
896btrace_call_history_insn_range (struct ui_out *uiout,
897 const struct btrace_function *bfun)
afedecd3 898{
7acbe133
MM
899 unsigned int begin, end, size;
900
901 size = VEC_length (btrace_insn_s, bfun->insn);
902 gdb_assert (size > 0);
afedecd3 903
23a7fe75 904 begin = bfun->insn_offset;
7acbe133 905 end = begin + size - 1;
afedecd3 906
23a7fe75 907 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 908 uiout->text (",");
23a7fe75 909 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
910}
911
ce0dfbea
MM
912/* Compute the lowest and highest source line for the instructions in BFUN
913 and return them in PBEGIN and PEND.
914 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
915 result from inlining or macro expansion. */
916
917static void
918btrace_compute_src_line_range (const struct btrace_function *bfun,
919 int *pbegin, int *pend)
920{
921 struct btrace_insn *insn;
922 struct symtab *symtab;
923 struct symbol *sym;
924 unsigned int idx;
925 int begin, end;
926
927 begin = INT_MAX;
928 end = INT_MIN;
929
930 sym = bfun->sym;
931 if (sym == NULL)
932 goto out;
933
934 symtab = symbol_symtab (sym);
935
936 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
937 {
938 struct symtab_and_line sal;
939
940 sal = find_pc_line (insn->pc, 0);
941 if (sal.symtab != symtab || sal.line == 0)
942 continue;
943
325fac50
PA
944 begin = std::min (begin, sal.line);
945 end = std::max (end, sal.line);
ce0dfbea
MM
946 }
947
948 out:
949 *pbegin = begin;
950 *pend = end;
951}
952
afedecd3
MM
953/* Print the source line information for a function call history line. */
954
955static void
23a7fe75
MM
956btrace_call_history_src_line (struct ui_out *uiout,
957 const struct btrace_function *bfun)
afedecd3
MM
958{
959 struct symbol *sym;
23a7fe75 960 int begin, end;
afedecd3
MM
961
962 sym = bfun->sym;
963 if (sym == NULL)
964 return;
965
112e8700 966 uiout->field_string ("file",
08be3fe3 967 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 968
ce0dfbea 969 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 970 if (end < begin)
afedecd3
MM
971 return;
972
112e8700
SM
973 uiout->text (":");
974 uiout->field_int ("min line", begin);
afedecd3 975
23a7fe75 976 if (end == begin)
afedecd3
MM
977 return;
978
112e8700
SM
979 uiout->text (",");
980 uiout->field_int ("max line", end);
afedecd3
MM
981}
982
0b722aec
MM
983/* Get the name of a branch trace function. */
984
985static const char *
986btrace_get_bfun_name (const struct btrace_function *bfun)
987{
988 struct minimal_symbol *msym;
989 struct symbol *sym;
990
991 if (bfun == NULL)
992 return "??";
993
994 msym = bfun->msym;
995 sym = bfun->sym;
996
997 if (sym != NULL)
998 return SYMBOL_PRINT_NAME (sym);
999 else if (msym != NULL)
efd66ac6 1000 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1001 else
1002 return "??";
1003}
1004
afedecd3
MM
1005/* Disassemble a section of the recorded function trace. */
1006
1007static void
23a7fe75 1008btrace_call_history (struct ui_out *uiout,
8710b709 1009 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1010 const struct btrace_call_iterator *begin,
1011 const struct btrace_call_iterator *end,
8d297bbf 1012 int int_flags)
afedecd3 1013{
23a7fe75 1014 struct btrace_call_iterator it;
8d297bbf 1015 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1016
8d297bbf 1017 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1018 btrace_call_number (end));
afedecd3 1019
23a7fe75 1020 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1021 {
23a7fe75
MM
1022 const struct btrace_function *bfun;
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 bfun = btrace_call_get (&it);
23a7fe75 1027 sym = bfun->sym;
0b722aec 1028 msym = bfun->msym;
23a7fe75 1029
afedecd3 1030 /* Print the function index. */
23a7fe75 1031 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1032 uiout->text ("\t");
afedecd3 1033
31fd9caa
MM
1034 /* Indicate gaps in the trace. */
1035 if (bfun->errcode != 0)
1036 {
1037 const struct btrace_config *conf;
1038
1039 conf = btrace_conf (btinfo);
1040
1041 /* We have trace so we must have a configuration. */
1042 gdb_assert (conf != NULL);
1043
1044 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1045
1046 continue;
1047 }
1048
8710b709
MM
1049 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1050 {
1051 int level = bfun->level + btinfo->level, i;
1052
1053 for (i = 0; i < level; ++i)
112e8700 1054 uiout->text (" ");
8710b709
MM
1055 }
1056
1057 if (sym != NULL)
112e8700 1058 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1059 else if (msym != NULL)
112e8700
SM
1060 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1061 else if (!uiout->is_mi_like_p ())
1062 uiout->field_string ("function", "??");
8710b709 1063
1e038f67 1064 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1065 {
112e8700 1066 uiout->text (_("\tinst "));
23a7fe75 1067 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1068 }
1069
1e038f67 1070 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1071 {
112e8700 1072 uiout->text (_("\tat "));
23a7fe75 1073 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1074 }
1075
112e8700 1076 uiout->text ("\n");
afedecd3
MM
1077 }
1078}
1079
1080/* The to_call_history method of target record-btrace. */
1081
1082static void
8d297bbf 1083record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1084{
1085 struct btrace_thread_info *btinfo;
23a7fe75
MM
1086 struct btrace_call_history *history;
1087 struct btrace_call_iterator begin, end;
afedecd3
MM
1088 struct cleanup *uiout_cleanup;
1089 struct ui_out *uiout;
23a7fe75 1090 unsigned int context, covered;
8d297bbf 1091 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1092
1093 uiout = current_uiout;
1094 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1095 "insn history");
afedecd3 1096 context = abs (size);
afedecd3
MM
1097 if (context == 0)
1098 error (_("Bad record function-call-history-size."));
1099
23a7fe75
MM
1100 btinfo = require_btrace ();
1101 history = btinfo->call_history;
1102 if (history == NULL)
afedecd3 1103 {
07bbe694 1104 struct btrace_insn_iterator *replay;
afedecd3 1105
8d297bbf 1106 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1107
07bbe694
MM
1108 /* If we're replaying, we start at the replay position. Otherwise, we
1109 start at the tail of the trace. */
1110 replay = btinfo->replay;
1111 if (replay != NULL)
1112 {
1113 begin.function = replay->function;
1114 begin.btinfo = btinfo;
1115 }
1116 else
1117 btrace_call_end (&begin, btinfo);
1118
1119 /* We start from here and expand in the requested direction. Then we
1120 expand in the other direction, as well, to fill up any remaining
1121 context. */
1122 end = begin;
1123 if (size < 0)
1124 {
1125 /* We want the current position covered, as well. */
1126 covered = btrace_call_next (&end, 1);
1127 covered += btrace_call_prev (&begin, context - covered);
1128 covered += btrace_call_next (&end, context - covered);
1129 }
1130 else
1131 {
1132 covered = btrace_call_next (&end, context);
1133 covered += btrace_call_prev (&begin, context- covered);
1134 }
afedecd3
MM
1135 }
1136 else
1137 {
23a7fe75
MM
1138 begin = history->begin;
1139 end = history->end;
afedecd3 1140
8d297bbf 1141 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1142 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1143
23a7fe75
MM
1144 if (size < 0)
1145 {
1146 end = begin;
1147 covered = btrace_call_prev (&begin, context);
1148 }
1149 else
1150 {
1151 begin = end;
1152 covered = btrace_call_next (&end, context);
1153 }
afedecd3
MM
1154 }
1155
23a7fe75 1156 if (covered > 0)
8710b709 1157 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1158 else
1159 {
1160 if (size < 0)
1161 printf_unfiltered (_("At the start of the branch trace record.\n"));
1162 else
1163 printf_unfiltered (_("At the end of the branch trace record.\n"));
1164 }
afedecd3 1165
23a7fe75 1166 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1167 do_cleanups (uiout_cleanup);
1168}
1169
1170/* The to_call_history_range method of target record-btrace. */
1171
1172static void
f0d960ea 1173record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1174 ULONGEST from, ULONGEST to,
1175 int int_flags)
afedecd3
MM
1176{
1177 struct btrace_thread_info *btinfo;
23a7fe75
MM
1178 struct btrace_call_history *history;
1179 struct btrace_call_iterator begin, end;
afedecd3
MM
1180 struct cleanup *uiout_cleanup;
1181 struct ui_out *uiout;
23a7fe75
MM
1182 unsigned int low, high;
1183 int found;
8d297bbf 1184 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1185
1186 uiout = current_uiout;
1187 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1188 "func history");
23a7fe75
MM
1189 low = from;
1190 high = to;
afedecd3 1191
8d297bbf 1192 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1193
1194 /* Check for wrap-arounds. */
23a7fe75 1195 if (low != from || high != to)
afedecd3
MM
1196 error (_("Bad range."));
1197
0688d04e 1198 if (high < low)
afedecd3
MM
1199 error (_("Bad range."));
1200
23a7fe75 1201 btinfo = require_btrace ();
afedecd3 1202
23a7fe75
MM
1203 found = btrace_find_call_by_number (&begin, btinfo, low);
1204 if (found == 0)
1205 error (_("Range out of bounds."));
afedecd3 1206
23a7fe75
MM
1207 found = btrace_find_call_by_number (&end, btinfo, high);
1208 if (found == 0)
0688d04e
MM
1209 {
1210 /* Silently truncate the range. */
1211 btrace_call_end (&end, btinfo);
1212 }
1213 else
1214 {
1215 /* We want both begin and end to be inclusive. */
1216 btrace_call_next (&end, 1);
1217 }
afedecd3 1218
8710b709 1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1220 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1221
1222 do_cleanups (uiout_cleanup);
1223}
1224
1225/* The to_call_history_from method of target record-btrace. */
1226
1227static void
ec0aea04 1228record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1229 ULONGEST from, int size,
1230 int int_flags)
afedecd3
MM
1231{
1232 ULONGEST begin, end, context;
8d297bbf 1233 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1234
1235 context = abs (size);
0688d04e
MM
1236 if (context == 0)
1237 error (_("Bad record function-call-history-size."));
afedecd3
MM
1238
1239 if (size < 0)
1240 {
1241 end = from;
1242
1243 if (from < context)
1244 begin = 0;
1245 else
0688d04e 1246 begin = from - context + 1;
afedecd3
MM
1247 }
1248 else
1249 {
1250 begin = from;
0688d04e 1251 end = from + context - 1;
afedecd3
MM
1252
1253 /* Check for wrap-around. */
1254 if (end < begin)
1255 end = ULONGEST_MAX;
1256 }
1257
f0d960ea 1258 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1259}
1260
b158a20f
TW
1261/* The to_record_method method of target record-btrace. */
1262
1263static enum record_method
1264record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1265{
1266 const struct btrace_config *config;
1267 struct thread_info * const tp = find_thread_ptid (ptid);
1268
1269 if (tp == NULL)
1270 error (_("No thread."));
1271
1272 if (tp->btrace.target == NULL)
1273 return RECORD_METHOD_NONE;
1274
1275 return RECORD_METHOD_BTRACE;
1276}
1277
07bbe694
MM
1278/* The to_record_is_replaying method of target record-btrace. */
1279
1280static int
a52eab48 1281record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1282{
1283 struct thread_info *tp;
1284
034f788c 1285 ALL_NON_EXITED_THREADS (tp)
a52eab48 1286 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1287 return 1;
1288
1289 return 0;
1290}
1291
7ff27e9b
MM
1292/* The to_record_will_replay method of target record-btrace. */
1293
1294static int
1295record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1296{
1297 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1298}
1299
633785ff
MM
1300/* The to_xfer_partial method of target record-btrace. */
1301
9b409511 1302static enum target_xfer_status
633785ff
MM
1303record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1304 const char *annex, gdb_byte *readbuf,
1305 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1306 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1307{
1308 struct target_ops *t;
1309
1310 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1311 if (replay_memory_access == replay_memory_access_read_only
aef92902 1312 && !record_btrace_generating_corefile
4d10e986 1313 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1314 {
1315 switch (object)
1316 {
1317 case TARGET_OBJECT_MEMORY:
1318 {
1319 struct target_section *section;
1320
1321 /* We do not allow writing memory in general. */
1322 if (writebuf != NULL)
9b409511
YQ
1323 {
1324 *xfered_len = len;
bc113b4e 1325 return TARGET_XFER_UNAVAILABLE;
9b409511 1326 }
633785ff
MM
1327
1328 /* We allow reading readonly memory. */
1329 section = target_section_by_addr (ops, offset);
1330 if (section != NULL)
1331 {
1332 /* Check if the section we found is readonly. */
1333 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1334 section->the_bfd_section)
1335 & SEC_READONLY) != 0)
1336 {
1337 /* Truncate the request to fit into this section. */
325fac50 1338 len = std::min (len, section->endaddr - offset);
633785ff
MM
1339 break;
1340 }
1341 }
1342
9b409511 1343 *xfered_len = len;
bc113b4e 1344 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1345 }
1346 }
1347 }
1348
1349 /* Forward the request. */
e75fdfca
TT
1350 ops = ops->beneath;
1351 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1352 offset, len, xfered_len);
633785ff
MM
1353}
1354
1355/* The to_insert_breakpoint method of target record-btrace. */
1356
1357static int
1358record_btrace_insert_breakpoint (struct target_ops *ops,
1359 struct gdbarch *gdbarch,
1360 struct bp_target_info *bp_tgt)
1361{
67b5c0c1
MM
1362 const char *old;
1363 int ret;
633785ff
MM
1364
1365 /* Inserting breakpoints requires accessing memory. Allow it for the
1366 duration of this function. */
67b5c0c1
MM
1367 old = replay_memory_access;
1368 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1369
1370 ret = 0;
492d29ea
PA
1371 TRY
1372 {
1373 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1374 }
492d29ea
PA
1375 CATCH (except, RETURN_MASK_ALL)
1376 {
6c63c96a 1377 replay_memory_access = old;
492d29ea
PA
1378 throw_exception (except);
1379 }
1380 END_CATCH
6c63c96a 1381 replay_memory_access = old;
633785ff
MM
1382
1383 return ret;
1384}
1385
1386/* The to_remove_breakpoint method of target record-btrace. */
1387
1388static int
1389record_btrace_remove_breakpoint (struct target_ops *ops,
1390 struct gdbarch *gdbarch,
73971819
PA
1391 struct bp_target_info *bp_tgt,
1392 enum remove_bp_reason reason)
633785ff 1393{
67b5c0c1
MM
1394 const char *old;
1395 int ret;
633785ff
MM
1396
1397 /* Removing breakpoints requires accessing memory. Allow it for the
1398 duration of this function. */
67b5c0c1
MM
1399 old = replay_memory_access;
1400 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1401
1402 ret = 0;
492d29ea
PA
1403 TRY
1404 {
73971819
PA
1405 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1406 reason);
492d29ea 1407 }
492d29ea
PA
1408 CATCH (except, RETURN_MASK_ALL)
1409 {
6c63c96a 1410 replay_memory_access = old;
492d29ea
PA
1411 throw_exception (except);
1412 }
1413 END_CATCH
6c63c96a 1414 replay_memory_access = old;
633785ff
MM
1415
1416 return ret;
1417}
1418
1f3ef581
MM
1419/* The to_fetch_registers method of target record-btrace. */
1420
1421static void
1422record_btrace_fetch_registers (struct target_ops *ops,
1423 struct regcache *regcache, int regno)
1424{
1425 struct btrace_insn_iterator *replay;
1426 struct thread_info *tp;
1427
bcc0c096 1428 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1429 gdb_assert (tp != NULL);
1430
1431 replay = tp->btrace.replay;
aef92902 1432 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1433 {
1434 const struct btrace_insn *insn;
1435 struct gdbarch *gdbarch;
1436 int pcreg;
1437
1438 gdbarch = get_regcache_arch (regcache);
1439 pcreg = gdbarch_pc_regnum (gdbarch);
1440 if (pcreg < 0)
1441 return;
1442
1443 /* We can only provide the PC register. */
1444 if (regno >= 0 && regno != pcreg)
1445 return;
1446
1447 insn = btrace_insn_get (replay);
1448 gdb_assert (insn != NULL);
1449
1450 regcache_raw_supply (regcache, regno, &insn->pc);
1451 }
1452 else
1453 {
e75fdfca 1454 struct target_ops *t = ops->beneath;
1f3ef581 1455
e75fdfca 1456 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1457 }
1458}
1459
1460/* The to_store_registers method of target record-btrace. */
1461
1462static void
1463record_btrace_store_registers (struct target_ops *ops,
1464 struct regcache *regcache, int regno)
1465{
1466 struct target_ops *t;
1467
a52eab48 1468 if (!record_btrace_generating_corefile
bcc0c096 1469 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1470 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1471
1472 gdb_assert (may_write_registers != 0);
1473
e75fdfca
TT
1474 t = ops->beneath;
1475 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1476}
1477
1478/* The to_prepare_to_store method of target record-btrace. */
1479
1480static void
1481record_btrace_prepare_to_store (struct target_ops *ops,
1482 struct regcache *regcache)
1483{
1484 struct target_ops *t;
1485
a52eab48 1486 if (!record_btrace_generating_corefile
bcc0c096 1487 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1488 return;
1489
e75fdfca
TT
1490 t = ops->beneath;
1491 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1492}
1493
0b722aec
MM
1494/* The branch trace frame cache. */
1495
1496struct btrace_frame_cache
1497{
1498 /* The thread. */
1499 struct thread_info *tp;
1500
1501 /* The frame info. */
1502 struct frame_info *frame;
1503
1504 /* The branch trace function segment. */
1505 const struct btrace_function *bfun;
1506};
1507
1508/* A struct btrace_frame_cache hash table indexed by NEXT. */
1509
1510static htab_t bfcache;
1511
1512/* hash_f for htab_create_alloc of bfcache. */
1513
1514static hashval_t
1515bfcache_hash (const void *arg)
1516{
19ba03f4
SM
1517 const struct btrace_frame_cache *cache
1518 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1519
1520 return htab_hash_pointer (cache->frame);
1521}
1522
1523/* eq_f for htab_create_alloc of bfcache. */
1524
1525static int
1526bfcache_eq (const void *arg1, const void *arg2)
1527{
19ba03f4
SM
1528 const struct btrace_frame_cache *cache1
1529 = (const struct btrace_frame_cache *) arg1;
1530 const struct btrace_frame_cache *cache2
1531 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1532
1533 return cache1->frame == cache2->frame;
1534}
1535
1536/* Create a new btrace frame cache. */
1537
1538static struct btrace_frame_cache *
1539bfcache_new (struct frame_info *frame)
1540{
1541 struct btrace_frame_cache *cache;
1542 void **slot;
1543
1544 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1545 cache->frame = frame;
1546
1547 slot = htab_find_slot (bfcache, cache, INSERT);
1548 gdb_assert (*slot == NULL);
1549 *slot = cache;
1550
1551 return cache;
1552}
1553
1554/* Extract the branch trace function from a branch trace frame. */
1555
1556static const struct btrace_function *
1557btrace_get_frame_function (struct frame_info *frame)
1558{
1559 const struct btrace_frame_cache *cache;
1560 const struct btrace_function *bfun;
1561 struct btrace_frame_cache pattern;
1562 void **slot;
1563
1564 pattern.frame = frame;
1565
1566 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1567 if (slot == NULL)
1568 return NULL;
1569
19ba03f4 1570 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1571 return cache->bfun;
1572}
1573
cecac1ab
MM
1574/* Implement stop_reason method for record_btrace_frame_unwind. */
1575
1576static enum unwind_stop_reason
1577record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1578 void **this_cache)
1579{
0b722aec
MM
1580 const struct btrace_frame_cache *cache;
1581 const struct btrace_function *bfun;
1582
19ba03f4 1583 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1584 bfun = cache->bfun;
1585 gdb_assert (bfun != NULL);
1586
1587 if (bfun->up == NULL)
1588 return UNWIND_UNAVAILABLE;
1589
1590 return UNWIND_NO_REASON;
cecac1ab
MM
1591}
1592
1593/* Implement this_id method for record_btrace_frame_unwind. */
1594
1595static void
1596record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1597 struct frame_id *this_id)
1598{
0b722aec
MM
1599 const struct btrace_frame_cache *cache;
1600 const struct btrace_function *bfun;
1601 CORE_ADDR code, special;
1602
19ba03f4 1603 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1604
1605 bfun = cache->bfun;
1606 gdb_assert (bfun != NULL);
1607
1608 while (bfun->segment.prev != NULL)
1609 bfun = bfun->segment.prev;
1610
1611 code = get_frame_func (this_frame);
1612 special = bfun->number;
1613
1614 *this_id = frame_id_build_unavailable_stack_special (code, special);
1615
1616 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1617 btrace_get_bfun_name (cache->bfun),
1618 core_addr_to_string_nz (this_id->code_addr),
1619 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1620}
1621
1622/* Implement prev_register method for record_btrace_frame_unwind. */
1623
1624static struct value *
1625record_btrace_frame_prev_register (struct frame_info *this_frame,
1626 void **this_cache,
1627 int regnum)
1628{
0b722aec
MM
1629 const struct btrace_frame_cache *cache;
1630 const struct btrace_function *bfun, *caller;
1631 const struct btrace_insn *insn;
1632 struct gdbarch *gdbarch;
1633 CORE_ADDR pc;
1634 int pcreg;
1635
1636 gdbarch = get_frame_arch (this_frame);
1637 pcreg = gdbarch_pc_regnum (gdbarch);
1638 if (pcreg < 0 || regnum != pcreg)
1639 throw_error (NOT_AVAILABLE_ERROR,
1640 _("Registers are not available in btrace record history"));
1641
19ba03f4 1642 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1643 bfun = cache->bfun;
1644 gdb_assert (bfun != NULL);
1645
1646 caller = bfun->up;
1647 if (caller == NULL)
1648 throw_error (NOT_AVAILABLE_ERROR,
1649 _("No caller in btrace record history"));
1650
1651 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1652 {
1653 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1654 pc = insn->pc;
1655 }
1656 else
1657 {
1658 insn = VEC_last (btrace_insn_s, caller->insn);
1659 pc = insn->pc;
1660
1661 pc += gdb_insn_length (gdbarch, pc);
1662 }
1663
1664 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1665 btrace_get_bfun_name (bfun), bfun->level,
1666 core_addr_to_string_nz (pc));
1667
1668 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1669}
1670
1671/* Implement sniffer method for record_btrace_frame_unwind. */
1672
1673static int
1674record_btrace_frame_sniffer (const struct frame_unwind *self,
1675 struct frame_info *this_frame,
1676 void **this_cache)
1677{
0b722aec
MM
1678 const struct btrace_function *bfun;
1679 struct btrace_frame_cache *cache;
cecac1ab 1680 struct thread_info *tp;
0b722aec 1681 struct frame_info *next;
cecac1ab
MM
1682
1683 /* THIS_FRAME does not contain a reference to its thread. */
1684 tp = find_thread_ptid (inferior_ptid);
1685 gdb_assert (tp != NULL);
1686
0b722aec
MM
1687 bfun = NULL;
1688 next = get_next_frame (this_frame);
1689 if (next == NULL)
1690 {
1691 const struct btrace_insn_iterator *replay;
1692
1693 replay = tp->btrace.replay;
1694 if (replay != NULL)
1695 bfun = replay->function;
1696 }
1697 else
1698 {
1699 const struct btrace_function *callee;
1700
1701 callee = btrace_get_frame_function (next);
1702 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1703 bfun = callee->up;
1704 }
1705
1706 if (bfun == NULL)
1707 return 0;
1708
1709 DEBUG ("[frame] sniffed frame for %s on level %d",
1710 btrace_get_bfun_name (bfun), bfun->level);
1711
1712 /* This is our frame. Initialize the frame cache. */
1713 cache = bfcache_new (this_frame);
1714 cache->tp = tp;
1715 cache->bfun = bfun;
1716
1717 *this_cache = cache;
1718 return 1;
1719}
1720
1721/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1722
1723static int
1724record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1725 struct frame_info *this_frame,
1726 void **this_cache)
1727{
1728 const struct btrace_function *bfun, *callee;
1729 struct btrace_frame_cache *cache;
1730 struct frame_info *next;
1731
1732 next = get_next_frame (this_frame);
1733 if (next == NULL)
1734 return 0;
1735
1736 callee = btrace_get_frame_function (next);
1737 if (callee == NULL)
1738 return 0;
1739
1740 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1741 return 0;
1742
1743 bfun = callee->up;
1744 if (bfun == NULL)
1745 return 0;
1746
1747 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1748 btrace_get_bfun_name (bfun), bfun->level);
1749
1750 /* This is our frame. Initialize the frame cache. */
1751 cache = bfcache_new (this_frame);
1752 cache->tp = find_thread_ptid (inferior_ptid);
1753 cache->bfun = bfun;
1754
1755 *this_cache = cache;
1756 return 1;
1757}
1758
1759static void
1760record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1761{
1762 struct btrace_frame_cache *cache;
1763 void **slot;
1764
19ba03f4 1765 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1766
1767 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1768 gdb_assert (slot != NULL);
1769
1770 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1771}
1772
1773/* btrace recording does not store previous memory content, neither the stack
1774 frames content. Any unwinding would return errorneous results as the stack
1775 contents no longer matches the changed PC value restored from history.
1776 Therefore this unwinder reports any possibly unwound registers as
1777 <unavailable>. */
1778
0b722aec 1779const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1780{
1781 NORMAL_FRAME,
1782 record_btrace_frame_unwind_stop_reason,
1783 record_btrace_frame_this_id,
1784 record_btrace_frame_prev_register,
1785 NULL,
0b722aec
MM
1786 record_btrace_frame_sniffer,
1787 record_btrace_frame_dealloc_cache
1788};
1789
1790const struct frame_unwind record_btrace_tailcall_frame_unwind =
1791{
1792 TAILCALL_FRAME,
1793 record_btrace_frame_unwind_stop_reason,
1794 record_btrace_frame_this_id,
1795 record_btrace_frame_prev_register,
1796 NULL,
1797 record_btrace_tailcall_frame_sniffer,
1798 record_btrace_frame_dealloc_cache
cecac1ab 1799};
b2f4cfde 1800
ac01945b
TT
1801/* Implement the to_get_unwinder method. */
1802
1803static const struct frame_unwind *
1804record_btrace_to_get_unwinder (struct target_ops *self)
1805{
1806 return &record_btrace_frame_unwind;
1807}
1808
1809/* Implement the to_get_tailcall_unwinder method. */
1810
1811static const struct frame_unwind *
1812record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1813{
1814 return &record_btrace_tailcall_frame_unwind;
1815}
1816
987e68b1
MM
1817/* Return a human-readable string for FLAG. */
1818
1819static const char *
1820btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1821{
1822 switch (flag)
1823 {
1824 case BTHR_STEP:
1825 return "step";
1826
1827 case BTHR_RSTEP:
1828 return "reverse-step";
1829
1830 case BTHR_CONT:
1831 return "cont";
1832
1833 case BTHR_RCONT:
1834 return "reverse-cont";
1835
1836 case BTHR_STOP:
1837 return "stop";
1838 }
1839
1840 return "<invalid>";
1841}
1842
52834460
MM
1843/* Indicate that TP should be resumed according to FLAG. */
1844
1845static void
1846record_btrace_resume_thread (struct thread_info *tp,
1847 enum btrace_thread_flag flag)
1848{
1849 struct btrace_thread_info *btinfo;
1850
43792cf0 1851 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1852 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1853
1854 btinfo = &tp->btrace;
1855
52834460
MM
1856 /* Fetch the latest branch trace. */
1857 btrace_fetch (tp);
1858
0ca912df
MM
1859 /* A resume request overwrites a preceding resume or stop request. */
1860 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1861 btinfo->flags |= flag;
1862}
1863
ec71cc2f
MM
1864/* Get the current frame for TP. */
1865
1866static struct frame_info *
1867get_thread_current_frame (struct thread_info *tp)
1868{
1869 struct frame_info *frame;
1870 ptid_t old_inferior_ptid;
1871 int executing;
1872
1873 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1874 old_inferior_ptid = inferior_ptid;
1875 inferior_ptid = tp->ptid;
1876
1877 /* Clear the executing flag to allow changes to the current frame.
1878 We are not actually running, yet. We just started a reverse execution
1879 command or a record goto command.
1880 For the latter, EXECUTING is false and this has no effect.
1881 For the former, EXECUTING is true and we're in to_wait, about to
1882 move the thread. Since we need to recompute the stack, we temporarily
1883 set EXECUTING to flase. */
1884 executing = is_executing (inferior_ptid);
1885 set_executing (inferior_ptid, 0);
1886
1887 frame = NULL;
1888 TRY
1889 {
1890 frame = get_current_frame ();
1891 }
1892 CATCH (except, RETURN_MASK_ALL)
1893 {
1894 /* Restore the previous execution state. */
1895 set_executing (inferior_ptid, executing);
1896
1897 /* Restore the previous inferior_ptid. */
1898 inferior_ptid = old_inferior_ptid;
1899
1900 throw_exception (except);
1901 }
1902 END_CATCH
1903
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid, executing);
1906
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid = old_inferior_ptid;
1909
1910 return frame;
1911}
1912
52834460
MM
1913/* Start replaying a thread. */
1914
1915static struct btrace_insn_iterator *
1916record_btrace_start_replaying (struct thread_info *tp)
1917{
52834460
MM
1918 struct btrace_insn_iterator *replay;
1919 struct btrace_thread_info *btinfo;
52834460
MM
1920
1921 btinfo = &tp->btrace;
1922 replay = NULL;
1923
1924 /* We can't start replaying without trace. */
1925 if (btinfo->begin == NULL)
1926 return NULL;
1927
52834460
MM
1928 /* GDB stores the current frame_id when stepping in order to detects steps
1929 into subroutines.
1930 Since frames are computed differently when we're replaying, we need to
1931 recompute those stored frames and fix them up so we can still detect
1932 subroutines after we started replaying. */
492d29ea 1933 TRY
52834460
MM
1934 {
1935 struct frame_info *frame;
1936 struct frame_id frame_id;
1937 int upd_step_frame_id, upd_step_stack_frame_id;
1938
1939 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1940 frame = get_thread_current_frame (tp);
52834460
MM
1941 frame_id = get_frame_id (frame);
1942
1943 /* Check if we need to update any stepping-related frame id's. */
1944 upd_step_frame_id = frame_id_eq (frame_id,
1945 tp->control.step_frame_id);
1946 upd_step_stack_frame_id = frame_id_eq (frame_id,
1947 tp->control.step_stack_frame_id);
1948
1949 /* We start replaying at the end of the branch trace. This corresponds
1950 to the current instruction. */
8d749320 1951 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1952 btrace_insn_end (replay, btinfo);
1953
31fd9caa
MM
1954 /* Skip gaps at the end of the trace. */
1955 while (btrace_insn_get (replay) == NULL)
1956 {
1957 unsigned int steps;
1958
1959 steps = btrace_insn_prev (replay, 1);
1960 if (steps == 0)
1961 error (_("No trace."));
1962 }
1963
52834460
MM
1964 /* We're not replaying, yet. */
1965 gdb_assert (btinfo->replay == NULL);
1966 btinfo->replay = replay;
1967
1968 /* Make sure we're not using any stale registers. */
1969 registers_changed_ptid (tp->ptid);
1970
1971 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1972 frame = get_thread_current_frame (tp);
52834460
MM
1973 frame_id = get_frame_id (frame);
1974
1975 /* Replace stepping related frames where necessary. */
1976 if (upd_step_frame_id)
1977 tp->control.step_frame_id = frame_id;
1978 if (upd_step_stack_frame_id)
1979 tp->control.step_stack_frame_id = frame_id;
1980 }
492d29ea 1981 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1982 {
1983 xfree (btinfo->replay);
1984 btinfo->replay = NULL;
1985
1986 registers_changed_ptid (tp->ptid);
1987
1988 throw_exception (except);
1989 }
492d29ea 1990 END_CATCH
52834460
MM
1991
1992 return replay;
1993}
1994
1995/* Stop replaying a thread. */
1996
1997static void
1998record_btrace_stop_replaying (struct thread_info *tp)
1999{
2000 struct btrace_thread_info *btinfo;
2001
2002 btinfo = &tp->btrace;
2003
2004 xfree (btinfo->replay);
2005 btinfo->replay = NULL;
2006
2007 /* Make sure we're not leaving any stale registers. */
2008 registers_changed_ptid (tp->ptid);
2009}
2010
e3cfc1c7
MM
2011/* Stop replaying TP if it is at the end of its execution history. */
2012
2013static void
2014record_btrace_stop_replaying_at_end (struct thread_info *tp)
2015{
2016 struct btrace_insn_iterator *replay, end;
2017 struct btrace_thread_info *btinfo;
2018
2019 btinfo = &tp->btrace;
2020 replay = btinfo->replay;
2021
2022 if (replay == NULL)
2023 return;
2024
2025 btrace_insn_end (&end, btinfo);
2026
2027 if (btrace_insn_cmp (replay, &end) == 0)
2028 record_btrace_stop_replaying (tp);
2029}
2030
b2f4cfde
MM
2031/* The to_resume method of target record-btrace. */
2032
2033static void
2034record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2035 enum gdb_signal signal)
2036{
0ca912df 2037 struct thread_info *tp;
d2939ba2 2038 enum btrace_thread_flag flag, cflag;
52834460 2039
987e68b1
MM
2040 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2041 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2042 step ? "step" : "cont");
52834460 2043
0ca912df
MM
2044 /* Store the execution direction of the last resume.
2045
2046 If there is more than one to_resume call, we have to rely on infrun
2047 to not change the execution direction in-between. */
70ad5bff
MM
2048 record_btrace_resume_exec_dir = execution_direction;
2049
0ca912df 2050 /* As long as we're not replaying, just forward the request.
52834460 2051
0ca912df
MM
2052 For non-stop targets this means that no thread is replaying. In order to
2053 make progress, we may need to explicitly move replaying threads to the end
2054 of their execution history. */
a52eab48
MM
2055 if ((execution_direction != EXEC_REVERSE)
2056 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2057 {
e75fdfca 2058 ops = ops->beneath;
04c4fe8c
MM
2059 ops->to_resume (ops, ptid, step, signal);
2060 return;
b2f4cfde
MM
2061 }
2062
52834460 2063 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2064 if (execution_direction == EXEC_REVERSE)
2065 {
2066 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2067 cflag = BTHR_RCONT;
2068 }
52834460 2069 else
d2939ba2
MM
2070 {
2071 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2072 cflag = BTHR_CONT;
2073 }
52834460 2074
52834460 2075 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2076 record_btrace_wait below.
2077
2078 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2079 if (!target_is_non_stop_p ())
2080 {
2081 gdb_assert (ptid_match (inferior_ptid, ptid));
2082
2083 ALL_NON_EXITED_THREADS (tp)
2084 if (ptid_match (tp->ptid, ptid))
2085 {
2086 if (ptid_match (tp->ptid, inferior_ptid))
2087 record_btrace_resume_thread (tp, flag);
2088 else
2089 record_btrace_resume_thread (tp, cflag);
2090 }
2091 }
2092 else
2093 {
2094 ALL_NON_EXITED_THREADS (tp)
2095 if (ptid_match (tp->ptid, ptid))
2096 record_btrace_resume_thread (tp, flag);
2097 }
70ad5bff
MM
2098
2099 /* Async support. */
2100 if (target_can_async_p ())
2101 {
6a3753b3 2102 target_async (1);
70ad5bff
MM
2103 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2104 }
52834460
MM
2105}
2106
85ad3aaf
PA
2107/* The to_commit_resume method of target record-btrace. */
2108
2109static void
2110record_btrace_commit_resume (struct target_ops *ops)
2111{
2112 if ((execution_direction != EXEC_REVERSE)
2113 && !record_btrace_is_replaying (ops, minus_one_ptid))
2114 ops->beneath->to_commit_resume (ops->beneath);
2115}
2116
987e68b1
MM
2117/* Cancel resuming TP. */
2118
2119static void
2120record_btrace_cancel_resume (struct thread_info *tp)
2121{
2122 enum btrace_thread_flag flags;
2123
2124 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2125 if (flags == 0)
2126 return;
2127
43792cf0
PA
2128 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2129 print_thread_id (tp),
987e68b1
MM
2130 target_pid_to_str (tp->ptid), flags,
2131 btrace_thread_flag_to_str (flags));
2132
2133 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2134 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2135}
2136
2137/* Return a target_waitstatus indicating that we ran out of history. */
2138
2139static struct target_waitstatus
2140btrace_step_no_history (void)
2141{
2142 struct target_waitstatus status;
2143
2144 status.kind = TARGET_WAITKIND_NO_HISTORY;
2145
2146 return status;
2147}
2148
2149/* Return a target_waitstatus indicating that a step finished. */
2150
2151static struct target_waitstatus
2152btrace_step_stopped (void)
2153{
2154 struct target_waitstatus status;
2155
2156 status.kind = TARGET_WAITKIND_STOPPED;
2157 status.value.sig = GDB_SIGNAL_TRAP;
2158
2159 return status;
2160}
2161
6e4879f0
MM
2162/* Return a target_waitstatus indicating that a thread was stopped as
2163 requested. */
2164
2165static struct target_waitstatus
2166btrace_step_stopped_on_request (void)
2167{
2168 struct target_waitstatus status;
2169
2170 status.kind = TARGET_WAITKIND_STOPPED;
2171 status.value.sig = GDB_SIGNAL_0;
2172
2173 return status;
2174}
2175
d825d248
MM
2176/* Return a target_waitstatus indicating a spurious stop. */
2177
2178static struct target_waitstatus
2179btrace_step_spurious (void)
2180{
2181 struct target_waitstatus status;
2182
2183 status.kind = TARGET_WAITKIND_SPURIOUS;
2184
2185 return status;
2186}
2187
e3cfc1c7
MM
2188/* Return a target_waitstatus indicating that the thread was not resumed. */
2189
2190static struct target_waitstatus
2191btrace_step_no_resumed (void)
2192{
2193 struct target_waitstatus status;
2194
2195 status.kind = TARGET_WAITKIND_NO_RESUMED;
2196
2197 return status;
2198}
2199
2200/* Return a target_waitstatus indicating that we should wait again. */
2201
2202static struct target_waitstatus
2203btrace_step_again (void)
2204{
2205 struct target_waitstatus status;
2206
2207 status.kind = TARGET_WAITKIND_IGNORE;
2208
2209 return status;
2210}
2211
52834460
MM
2212/* Clear the record histories. */
2213
2214static void
2215record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2216{
2217 xfree (btinfo->insn_history);
2218 xfree (btinfo->call_history);
2219
2220 btinfo->insn_history = NULL;
2221 btinfo->call_history = NULL;
2222}
2223
3c615f99
MM
2224/* Check whether TP's current replay position is at a breakpoint. */
2225
2226static int
2227record_btrace_replay_at_breakpoint (struct thread_info *tp)
2228{
2229 struct btrace_insn_iterator *replay;
2230 struct btrace_thread_info *btinfo;
2231 const struct btrace_insn *insn;
2232 struct inferior *inf;
2233
2234 btinfo = &tp->btrace;
2235 replay = btinfo->replay;
2236
2237 if (replay == NULL)
2238 return 0;
2239
2240 insn = btrace_insn_get (replay);
2241 if (insn == NULL)
2242 return 0;
2243
2244 inf = find_inferior_ptid (tp->ptid);
2245 if (inf == NULL)
2246 return 0;
2247
2248 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2249 &btinfo->stop_reason);
2250}
2251
d825d248 2252/* Step one instruction in forward direction. */
52834460
MM
2253
2254static struct target_waitstatus
d825d248 2255record_btrace_single_step_forward (struct thread_info *tp)
52834460 2256{
b61ce85c 2257 struct btrace_insn_iterator *replay, end, start;
52834460 2258 struct btrace_thread_info *btinfo;
52834460 2259
d825d248
MM
2260 btinfo = &tp->btrace;
2261 replay = btinfo->replay;
2262
2263 /* We're done if we're not replaying. */
2264 if (replay == NULL)
2265 return btrace_step_no_history ();
2266
011c71b6
MM
2267 /* Check if we're stepping a breakpoint. */
2268 if (record_btrace_replay_at_breakpoint (tp))
2269 return btrace_step_stopped ();
2270
b61ce85c
MM
2271 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2272 jump back to the instruction at which we started. */
2273 start = *replay;
d825d248
MM
2274 do
2275 {
2276 unsigned int steps;
2277
e3cfc1c7
MM
2278 /* We will bail out here if we continue stepping after reaching the end
2279 of the execution history. */
d825d248
MM
2280 steps = btrace_insn_next (replay, 1);
2281 if (steps == 0)
b61ce85c
MM
2282 {
2283 *replay = start;
2284 return btrace_step_no_history ();
2285 }
d825d248
MM
2286 }
2287 while (btrace_insn_get (replay) == NULL);
2288
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2291
e3cfc1c7
MM
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
d825d248 2295 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2296 return btrace_step_no_history ();
d825d248
MM
2297
2298 return btrace_step_spurious ();
2299}
2300
2301/* Step one instruction in backward direction. */
2302
2303static struct target_waitstatus
2304record_btrace_single_step_backward (struct thread_info *tp)
2305{
b61ce85c 2306 struct btrace_insn_iterator *replay, start;
d825d248 2307 struct btrace_thread_info *btinfo;
e59fa00f 2308
52834460
MM
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
d825d248
MM
2312 /* Start replaying if we're not already doing so. */
2313 if (replay == NULL)
2314 replay = record_btrace_start_replaying (tp);
2315
2316 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2317 Skip gaps during replay. If we end up at a gap (at the beginning of
2318 the trace), jump back to the instruction at which we started. */
2319 start = *replay;
d825d248
MM
2320 do
2321 {
2322 unsigned int steps;
2323
2324 steps = btrace_insn_prev (replay, 1);
2325 if (steps == 0)
b61ce85c
MM
2326 {
2327 *replay = start;
2328 return btrace_step_no_history ();
2329 }
d825d248
MM
2330 }
2331 while (btrace_insn_get (replay) == NULL);
2332
011c71b6
MM
2333 /* Check if we're stepping a breakpoint.
2334
2335 For reverse-stepping, this check is after the step. There is logic in
2336 infrun.c that handles reverse-stepping separately. See, for example,
2337 proceed and adjust_pc_after_break.
2338
2339 This code assumes that for reverse-stepping, PC points to the last
2340 de-executed instruction, whereas for forward-stepping PC points to the
2341 next to-be-executed instruction. */
2342 if (record_btrace_replay_at_breakpoint (tp))
2343 return btrace_step_stopped ();
2344
d825d248
MM
2345 return btrace_step_spurious ();
2346}
2347
2348/* Step a single thread. */
2349
2350static struct target_waitstatus
2351record_btrace_step_thread (struct thread_info *tp)
2352{
2353 struct btrace_thread_info *btinfo;
2354 struct target_waitstatus status;
2355 enum btrace_thread_flag flags;
2356
2357 btinfo = &tp->btrace;
2358
6e4879f0
MM
2359 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2360 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2361
43792cf0 2362 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2363 target_pid_to_str (tp->ptid), flags,
2364 btrace_thread_flag_to_str (flags));
52834460 2365
6e4879f0
MM
2366 /* We can't step without an execution history. */
2367 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2368 return btrace_step_no_history ();
2369
52834460
MM
2370 switch (flags)
2371 {
2372 default:
2373 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2374
6e4879f0
MM
2375 case BTHR_STOP:
2376 return btrace_step_stopped_on_request ();
2377
52834460 2378 case BTHR_STEP:
d825d248
MM
2379 status = record_btrace_single_step_forward (tp);
2380 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2381 break;
52834460
MM
2382
2383 return btrace_step_stopped ();
2384
2385 case BTHR_RSTEP:
d825d248
MM
2386 status = record_btrace_single_step_backward (tp);
2387 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2388 break;
52834460
MM
2389
2390 return btrace_step_stopped ();
2391
2392 case BTHR_CONT:
e3cfc1c7
MM
2393 status = record_btrace_single_step_forward (tp);
2394 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2395 break;
52834460 2396
e3cfc1c7
MM
2397 btinfo->flags |= flags;
2398 return btrace_step_again ();
52834460
MM
2399
2400 case BTHR_RCONT:
e3cfc1c7
MM
2401 status = record_btrace_single_step_backward (tp);
2402 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2403 break;
52834460 2404
e3cfc1c7
MM
2405 btinfo->flags |= flags;
2406 return btrace_step_again ();
2407 }
d825d248 2408
e3cfc1c7
MM
2409 /* We keep threads moving at the end of their execution history. The to_wait
2410 method will stop the thread for whom the event is reported. */
2411 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2412 btinfo->flags |= flags;
52834460 2413
e3cfc1c7 2414 return status;
b2f4cfde
MM
2415}
2416
e3cfc1c7
MM
2417/* A vector of threads. */
2418
2419typedef struct thread_info * tp_t;
2420DEF_VEC_P (tp_t);
2421
a6b5be76
MM
2422/* Announce further events if necessary. */
2423
2424static void
2425record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2426 const VEC (tp_t) *no_history)
2427{
2428 int more_moving, more_no_history;
2429
2430 more_moving = !VEC_empty (tp_t, moving);
2431 more_no_history = !VEC_empty (tp_t, no_history);
2432
2433 if (!more_moving && !more_no_history)
2434 return;
2435
2436 if (more_moving)
2437 DEBUG ("movers pending");
2438
2439 if (more_no_history)
2440 DEBUG ("no-history pending");
2441
2442 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2443}
2444
b2f4cfde
MM
2445/* The to_wait method of target record-btrace. */
2446
2447static ptid_t
2448record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2449 struct target_waitstatus *status, int options)
2450{
e3cfc1c7
MM
2451 VEC (tp_t) *moving, *no_history;
2452 struct thread_info *tp, *eventing;
2453 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2454
2455 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2456
b2f4cfde 2457 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2458 if ((execution_direction != EXEC_REVERSE)
2459 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2460 {
e75fdfca
TT
2461 ops = ops->beneath;
2462 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2463 }
2464
e3cfc1c7
MM
2465 moving = NULL;
2466 no_history = NULL;
2467
2468 make_cleanup (VEC_cleanup (tp_t), &moving);
2469 make_cleanup (VEC_cleanup (tp_t), &no_history);
2470
2471 /* Keep a work list of moving threads. */
2472 ALL_NON_EXITED_THREADS (tp)
2473 if (ptid_match (tp->ptid, ptid)
2474 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2475 VEC_safe_push (tp_t, moving, tp);
2476
2477 if (VEC_empty (tp_t, moving))
52834460 2478 {
e3cfc1c7 2479 *status = btrace_step_no_resumed ();
52834460 2480
e3cfc1c7
MM
2481 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2482 target_waitstatus_to_string (status));
2483
2484 do_cleanups (cleanups);
2485 return null_ptid;
52834460
MM
2486 }
2487
e3cfc1c7
MM
2488 /* Step moving threads one by one, one step each, until either one thread
2489 reports an event or we run out of threads to step.
2490
2491 When stepping more than one thread, chances are that some threads reach
2492 the end of their execution history earlier than others. If we reported
2493 this immediately, all-stop on top of non-stop would stop all threads and
2494 resume the same threads next time. And we would report the same thread
2495 having reached the end of its execution history again.
2496
2497 In the worst case, this would starve the other threads. But even if other
2498 threads would be allowed to make progress, this would result in far too
2499 many intermediate stops.
2500
2501 We therefore delay the reporting of "no execution history" until we have
2502 nothing else to report. By this time, all threads should have moved to
2503 either the beginning or the end of their execution history. There will
2504 be a single user-visible stop. */
2505 eventing = NULL;
2506 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2507 {
2508 unsigned int ix;
2509
2510 ix = 0;
2511 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2512 {
2513 *status = record_btrace_step_thread (tp);
2514
2515 switch (status->kind)
2516 {
2517 case TARGET_WAITKIND_IGNORE:
2518 ix++;
2519 break;
2520
2521 case TARGET_WAITKIND_NO_HISTORY:
2522 VEC_safe_push (tp_t, no_history,
2523 VEC_ordered_remove (tp_t, moving, ix));
2524 break;
2525
2526 default:
2527 eventing = VEC_unordered_remove (tp_t, moving, ix);
2528 break;
2529 }
2530 }
2531 }
2532
2533 if (eventing == NULL)
2534 {
2535 /* We started with at least one moving thread. This thread must have
2536 either stopped or reached the end of its execution history.
2537
2538 In the former case, EVENTING must not be NULL.
2539 In the latter case, NO_HISTORY must not be empty. */
2540 gdb_assert (!VEC_empty (tp_t, no_history));
2541
2542 /* We kept threads moving at the end of their execution history. Stop
2543 EVENTING now that we are going to report its stop. */
2544 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2545 eventing->btrace.flags &= ~BTHR_MOVE;
2546
2547 *status = btrace_step_no_history ();
2548 }
2549
2550 gdb_assert (eventing != NULL);
2551
2552 /* We kept threads replaying at the end of their execution history. Stop
2553 replaying EVENTING now that we are going to report its stop. */
2554 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2555
2556 /* Stop all other threads. */
5953356c 2557 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2558 ALL_NON_EXITED_THREADS (tp)
2559 record_btrace_cancel_resume (tp);
52834460 2560
a6b5be76
MM
2561 /* In async mode, we need to announce further events. */
2562 if (target_is_async_p ())
2563 record_btrace_maybe_mark_async_event (moving, no_history);
2564
52834460 2565 /* Start record histories anew from the current position. */
e3cfc1c7 2566 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2567
2568 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2569 registers_changed_ptid (eventing->ptid);
2570
43792cf0
PA
2571 DEBUG ("wait ended by thread %s (%s): %s",
2572 print_thread_id (eventing),
e3cfc1c7
MM
2573 target_pid_to_str (eventing->ptid),
2574 target_waitstatus_to_string (status));
52834460 2575
e3cfc1c7
MM
2576 do_cleanups (cleanups);
2577 return eventing->ptid;
52834460
MM
2578}
2579
6e4879f0
MM
2580/* The to_stop method of target record-btrace. */
2581
2582static void
2583record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2584{
2585 DEBUG ("stop %s", target_pid_to_str (ptid));
2586
2587 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2588 if ((execution_direction != EXEC_REVERSE)
2589 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2590 {
2591 ops = ops->beneath;
2592 ops->to_stop (ops, ptid);
2593 }
2594 else
2595 {
2596 struct thread_info *tp;
2597
2598 ALL_NON_EXITED_THREADS (tp)
2599 if (ptid_match (tp->ptid, ptid))
2600 {
2601 tp->btrace.flags &= ~BTHR_MOVE;
2602 tp->btrace.flags |= BTHR_STOP;
2603 }
2604 }
2605 }
2606
52834460
MM
2607/* The to_can_execute_reverse method of target record-btrace. */
2608
2609static int
19db3e69 2610record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2611{
2612 return 1;
2613}
2614
9e8915c6 2615/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2616
9e8915c6
PA
2617static int
2618record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2619{
a52eab48 2620 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2621 {
2622 struct thread_info *tp = inferior_thread ();
2623
2624 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2625 }
2626
2627 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2628}
2629
2630/* The to_supports_stopped_by_sw_breakpoint method of target
2631 record-btrace. */
2632
2633static int
2634record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2635{
a52eab48 2636 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2637 return 1;
2638
2639 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2640}
2641
2642/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2643
2644static int
2645record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2646{
a52eab48 2647 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2648 {
2649 struct thread_info *tp = inferior_thread ();
2650
2651 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2652 }
2653
2654 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2655}
2656
2657/* The to_supports_stopped_by_hw_breakpoint method of target
2658 record-btrace. */
2659
2660static int
2661record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2662{
a52eab48 2663 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2664 return 1;
52834460 2665
9e8915c6 2666 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2667}
2668
e8032dde 2669/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2670
2671static void
e8032dde 2672record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2673{
e8032dde 2674 /* We don't add or remove threads during replay. */
a52eab48 2675 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2676 return;
2677
2678 /* Forward the request. */
e75fdfca 2679 ops = ops->beneath;
e8032dde 2680 ops->to_update_thread_list (ops);
e2887aa3
MM
2681}
2682
2683/* The to_thread_alive method of target record-btrace. */
2684
2685static int
2686record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2687{
2688 /* We don't add or remove threads during replay. */
a52eab48 2689 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2690 return find_thread_ptid (ptid) != NULL;
2691
2692 /* Forward the request. */
e75fdfca
TT
2693 ops = ops->beneath;
2694 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2695}
2696
066ce621
MM
2697/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2698 is stopped. */
2699
2700static void
2701record_btrace_set_replay (struct thread_info *tp,
2702 const struct btrace_insn_iterator *it)
2703{
2704 struct btrace_thread_info *btinfo;
2705
2706 btinfo = &tp->btrace;
2707
2708 if (it == NULL || it->function == NULL)
52834460 2709 record_btrace_stop_replaying (tp);
066ce621
MM
2710 else
2711 {
2712 if (btinfo->replay == NULL)
52834460 2713 record_btrace_start_replaying (tp);
066ce621
MM
2714 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2715 return;
2716
2717 *btinfo->replay = *it;
52834460 2718 registers_changed_ptid (tp->ptid);
066ce621
MM
2719 }
2720
52834460
MM
2721 /* Start anew from the new replay position. */
2722 record_btrace_clear_histories (btinfo);
485668e5
MM
2723
2724 stop_pc = regcache_read_pc (get_current_regcache ());
2725 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2726}
2727
2728/* The to_goto_record_begin method of target record-btrace. */
2729
2730static void
08475817 2731record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2732{
2733 struct thread_info *tp;
2734 struct btrace_insn_iterator begin;
2735
2736 tp = require_btrace_thread ();
2737
2738 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2739
2740 /* Skip gaps at the beginning of the trace. */
2741 while (btrace_insn_get (&begin) == NULL)
2742 {
2743 unsigned int steps;
2744
2745 steps = btrace_insn_next (&begin, 1);
2746 if (steps == 0)
2747 error (_("No trace."));
2748 }
2749
066ce621 2750 record_btrace_set_replay (tp, &begin);
066ce621
MM
2751}
2752
2753/* The to_goto_record_end method of target record-btrace. */
2754
2755static void
307a1b91 2756record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2757{
2758 struct thread_info *tp;
2759
2760 tp = require_btrace_thread ();
2761
2762 record_btrace_set_replay (tp, NULL);
066ce621
MM
2763}
2764
2765/* The to_goto_record method of target record-btrace. */
2766
2767static void
606183ac 2768record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2769{
2770 struct thread_info *tp;
2771 struct btrace_insn_iterator it;
2772 unsigned int number;
2773 int found;
2774
2775 number = insn;
2776
2777 /* Check for wrap-arounds. */
2778 if (number != insn)
2779 error (_("Instruction number out of range."));
2780
2781 tp = require_btrace_thread ();
2782
2783 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2784
2785 /* Check if the instruction could not be found or is a gap. */
2786 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2787 error (_("No such instruction."));
2788
2789 record_btrace_set_replay (tp, &it);
066ce621
MM
2790}
2791
797094dd
MM
2792/* The to_record_stop_replaying method of target record-btrace. */
2793
2794static void
2795record_btrace_stop_replaying_all (struct target_ops *self)
2796{
2797 struct thread_info *tp;
2798
2799 ALL_NON_EXITED_THREADS (tp)
2800 record_btrace_stop_replaying (tp);
2801}
2802
70ad5bff
MM
2803/* The to_execution_direction target method. */
2804
2805static enum exec_direction_kind
2806record_btrace_execution_direction (struct target_ops *self)
2807{
2808 return record_btrace_resume_exec_dir;
2809}
2810
aef92902
MM
2811/* The to_prepare_to_generate_core target method. */
2812
2813static void
2814record_btrace_prepare_to_generate_core (struct target_ops *self)
2815{
2816 record_btrace_generating_corefile = 1;
2817}
2818
2819/* The to_done_generating_core target method. */
2820
2821static void
2822record_btrace_done_generating_core (struct target_ops *self)
2823{
2824 record_btrace_generating_corefile = 0;
2825}
2826
afedecd3
MM
2827/* Initialize the record-btrace target ops. */
2828
2829static void
2830init_record_btrace_ops (void)
2831{
2832 struct target_ops *ops;
2833
2834 ops = &record_btrace_ops;
2835 ops->to_shortname = "record-btrace";
2836 ops->to_longname = "Branch tracing target";
2837 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2838 ops->to_open = record_btrace_open;
2839 ops->to_close = record_btrace_close;
b7d2e916 2840 ops->to_async = record_btrace_async;
afedecd3 2841 ops->to_detach = record_detach;
c0272db5 2842 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2843 ops->to_mourn_inferior = record_mourn_inferior;
2844 ops->to_kill = record_kill;
afedecd3
MM
2845 ops->to_stop_recording = record_btrace_stop_recording;
2846 ops->to_info_record = record_btrace_info;
2847 ops->to_insn_history = record_btrace_insn_history;
2848 ops->to_insn_history_from = record_btrace_insn_history_from;
2849 ops->to_insn_history_range = record_btrace_insn_history_range;
2850 ops->to_call_history = record_btrace_call_history;
2851 ops->to_call_history_from = record_btrace_call_history_from;
2852 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2853 ops->to_record_method = record_btrace_record_method;
07bbe694 2854 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2855 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2856 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2857 ops->to_xfer_partial = record_btrace_xfer_partial;
2858 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2859 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2860 ops->to_fetch_registers = record_btrace_fetch_registers;
2861 ops->to_store_registers = record_btrace_store_registers;
2862 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2863 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2864 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2865 ops->to_resume = record_btrace_resume;
85ad3aaf 2866 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2867 ops->to_wait = record_btrace_wait;
6e4879f0 2868 ops->to_stop = record_btrace_stop;
e8032dde 2869 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2870 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2871 ops->to_goto_record_begin = record_btrace_goto_begin;
2872 ops->to_goto_record_end = record_btrace_goto_end;
2873 ops->to_goto_record = record_btrace_goto;
52834460 2874 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2875 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2876 ops->to_supports_stopped_by_sw_breakpoint
2877 = record_btrace_supports_stopped_by_sw_breakpoint;
2878 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2879 ops->to_supports_stopped_by_hw_breakpoint
2880 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2881 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2882 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2883 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2884 ops->to_stratum = record_stratum;
2885 ops->to_magic = OPS_MAGIC;
2886}
2887
f4abbc16
MM
2888/* Start recording in BTS format. */
2889
2890static void
2891cmd_record_btrace_bts_start (char *args, int from_tty)
2892{
f4abbc16
MM
2893 if (args != NULL && *args != 0)
2894 error (_("Invalid argument."));
2895
2896 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2897
492d29ea
PA
2898 TRY
2899 {
9b2eba3d 2900 execute_command ((char *) "target record-btrace", from_tty);
492d29ea
PA
2901 }
2902 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2903 {
2904 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2905 throw_exception (exception);
2906 }
492d29ea 2907 END_CATCH
f4abbc16
MM
2908}
2909
bc504a31 2910/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2911
2912static void
b20a6524 2913cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2914{
2915 if (args != NULL && *args != 0)
2916 error (_("Invalid argument."));
2917
b20a6524 2918 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2919
492d29ea
PA
2920 TRY
2921 {
9b2eba3d 2922 execute_command ((char *) "target record-btrace", from_tty);
492d29ea
PA
2923 }
2924 CATCH (exception, RETURN_MASK_ALL)
2925 {
2926 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2927 throw_exception (exception);
2928 }
2929 END_CATCH
afedecd3
MM
2930}
2931
b20a6524
MM
2932/* Alias for "target record". */
2933
2934static void
2935cmd_record_btrace_start (char *args, int from_tty)
2936{
2937 if (args != NULL && *args != 0)
2938 error (_("Invalid argument."));
2939
2940 record_btrace_conf.format = BTRACE_FORMAT_PT;
2941
2942 TRY
2943 {
9b2eba3d 2944 execute_command ((char *) "target record-btrace", from_tty);
b20a6524
MM
2945 }
2946 CATCH (exception, RETURN_MASK_ALL)
2947 {
2948 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2949
2950 TRY
2951 {
9b2eba3d 2952 execute_command ((char *) "target record-btrace", from_tty);
b20a6524
MM
2953 }
2954 CATCH (exception, RETURN_MASK_ALL)
2955 {
2956 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2957 throw_exception (exception);
2958 }
2959 END_CATCH
2960 }
2961 END_CATCH
2962}
2963
67b5c0c1
MM
2964/* The "set record btrace" command. */
2965
2966static void
2967cmd_set_record_btrace (char *args, int from_tty)
2968{
2969 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2970}
2971
2972/* The "show record btrace" command. */
2973
2974static void
2975cmd_show_record_btrace (char *args, int from_tty)
2976{
2977 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2978}
2979
2980/* The "show record btrace replay-memory-access" command. */
2981
2982static void
2983cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2984 struct cmd_list_element *c, const char *value)
2985{
2986 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2987 replay_memory_access);
2988}
2989
d33501a5
MM
2990/* The "set record btrace bts" command. */
2991
2992static void
2993cmd_set_record_btrace_bts (char *args, int from_tty)
2994{
2995 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2996 "by an appropriate subcommand.\n"));
d33501a5
MM
2997 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2998 all_commands, gdb_stdout);
2999}
3000
3001/* The "show record btrace bts" command. */
3002
3003static void
3004cmd_show_record_btrace_bts (char *args, int from_tty)
3005{
3006 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3007}
3008
b20a6524
MM
3009/* The "set record btrace pt" command. */
3010
3011static void
3012cmd_set_record_btrace_pt (char *args, int from_tty)
3013{
3014 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3015 "by an appropriate subcommand.\n"));
3016 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3017 all_commands, gdb_stdout);
3018}
3019
3020/* The "show record btrace pt" command. */
3021
3022static void
3023cmd_show_record_btrace_pt (char *args, int from_tty)
3024{
3025 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3026}
3027
3028/* The "record bts buffer-size" show value function. */
3029
3030static void
3031show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3032 struct cmd_list_element *c,
3033 const char *value)
3034{
3035 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3036 value);
3037}
3038
3039/* The "record pt buffer-size" show value function. */
3040
3041static void
3042show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3043 struct cmd_list_element *c,
3044 const char *value)
3045{
3046 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3047 value);
3048}
3049
afedecd3
MM
3050void _initialize_record_btrace (void);
3051
3052/* Initialize btrace commands. */
3053
3054void
3055_initialize_record_btrace (void)
3056{
f4abbc16
MM
3057 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3058 _("Start branch trace recording."), &record_btrace_cmdlist,
3059 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3060 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3061
f4abbc16
MM
3062 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3063 _("\
3064Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3065The processor stores a from/to record for each branch into a cyclic buffer.\n\
3066This format may not be available on all processors."),
3067 &record_btrace_cmdlist);
3068 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3069
b20a6524
MM
3070 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3071 _("\
bc504a31 3072Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3073This format may not be available on all processors."),
3074 &record_btrace_cmdlist);
3075 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3076
67b5c0c1
MM
3077 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3078 _("Set record options"), &set_record_btrace_cmdlist,
3079 "set record btrace ", 0, &set_record_cmdlist);
3080
3081 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3082 _("Show record options"), &show_record_btrace_cmdlist,
3083 "show record btrace ", 0, &show_record_cmdlist);
3084
3085 add_setshow_enum_cmd ("replay-memory-access", no_class,
3086 replay_memory_access_types, &replay_memory_access, _("\
3087Set what memory accesses are allowed during replay."), _("\
3088Show what memory accesses are allowed during replay."),
3089 _("Default is READ-ONLY.\n\n\
3090The btrace record target does not trace data.\n\
3091The memory therefore corresponds to the live target and not \
3092to the current replay position.\n\n\
3093When READ-ONLY, allow accesses to read-only memory during replay.\n\
3094When READ-WRITE, allow accesses to read-only and read-write memory during \
3095replay."),
3096 NULL, cmd_show_replay_memory_access,
3097 &set_record_btrace_cmdlist,
3098 &show_record_btrace_cmdlist);
3099
d33501a5
MM
3100 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3101 _("Set record btrace bts options"),
3102 &set_record_btrace_bts_cmdlist,
3103 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3104
3105 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3106 _("Show record btrace bts options"),
3107 &show_record_btrace_bts_cmdlist,
3108 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3109
3110 add_setshow_uinteger_cmd ("buffer-size", no_class,
3111 &record_btrace_conf.bts.size,
3112 _("Set the record/replay bts buffer size."),
3113 _("Show the record/replay bts buffer size."), _("\
3114When starting recording request a trace buffer of this size. \
3115The actual buffer size may differ from the requested size. \
3116Use \"info record\" to see the actual buffer size.\n\n\
3117Bigger buffers allow longer recording but also take more time to process \
3118the recorded execution trace.\n\n\
b20a6524
MM
3119The trace buffer size may not be changed while recording."), NULL,
3120 show_record_bts_buffer_size_value,
d33501a5
MM
3121 &set_record_btrace_bts_cmdlist,
3122 &show_record_btrace_bts_cmdlist);
3123
b20a6524
MM
3124 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3125 _("Set record btrace pt options"),
3126 &set_record_btrace_pt_cmdlist,
3127 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3128
3129 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3130 _("Show record btrace pt options"),
3131 &show_record_btrace_pt_cmdlist,
3132 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3133
3134 add_setshow_uinteger_cmd ("buffer-size", no_class,
3135 &record_btrace_conf.pt.size,
3136 _("Set the record/replay pt buffer size."),
3137 _("Show the record/replay pt buffer size."), _("\
3138Bigger buffers allow longer recording but also take more time to process \
3139the recorded execution.\n\
3140The actual buffer size may differ from the requested size. Use \"info record\" \
3141to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3142 &set_record_btrace_pt_cmdlist,
3143 &show_record_btrace_pt_cmdlist);
3144
afedecd3
MM
3145 init_record_btrace_ops ();
3146 add_target (&record_btrace_ops);
0b722aec
MM
3147
3148 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3149 xcalloc, xfree);
d33501a5
MM
3150
3151 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3152 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3153}