]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
btrace: Remove constant arguments.
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
162/* Callback function to disable branch tracing for one thread. */
163
164static void
165record_btrace_disable_callback (void *arg)
166{
19ba03f4 167 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
168
169 btrace_disable (tp);
170}
171
172/* Enable automatic tracing of new threads. */
173
174static void
175record_btrace_auto_enable (void)
176{
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181}
182
183/* Disable automatic tracing of new threads. */
184
185static void
186record_btrace_auto_disable (void)
187{
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196}
197
70ad5bff
MM
198/* The record-btrace async event handler function. */
199
200static void
201record_btrace_handle_async_inferior_event (gdb_client_data data)
202{
203 inferior_event_handler (INF_REG_EVENT, NULL);
204}
205
c0272db5
TW
206/* See record-btrace.h. */
207
208void
209record_btrace_push_target (void)
210{
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224}
225
afedecd3
MM
226/* The to_open method of target record-btrace. */
227
228static void
014f9477 229record_btrace_open (const char *args, int from_tty)
afedecd3
MM
230{
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
8213266a 236 record_preopen ();
afedecd3
MM
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
afedecd3
MM
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 244 ALL_NON_EXITED_THREADS (tp)
5d5658a1 245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 246 {
f4abbc16 247 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
c0272db5 252 record_btrace_push_target ();
afedecd3
MM
253
254 discard_cleanups (disable_chain);
255}
256
257/* The to_stop_recording method of target record-btrace. */
258
259static void
c6cd7c02 260record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
261{
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
034f788c 268 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271}
272
c0272db5
TW
273/* The to_disconnect method of target record-btrace. */
274
275static void
276record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278{
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286}
287
afedecd3
MM
288/* The to_close method of target record-btrace. */
289
290static void
de90e03d 291record_btrace_close (struct target_ops *self)
afedecd3 292{
568e808b
MM
293 struct thread_info *tp;
294
70ad5bff
MM
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
99c819ee
MM
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
568e808b
MM
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
034f788c 304 ALL_NON_EXITED_THREADS (tp)
568e808b 305 btrace_teardown (tp);
afedecd3
MM
306}
307
b7d2e916
PA
308/* The to_async method of target record-btrace. */
309
310static void
6a3753b3 311record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 312{
6a3753b3 313 if (enable)
b7d2e916
PA
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
6a3753b3 318 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
319}
320
d33501a5
MM
321/* Adjusts the size and returns a human readable size suffix. */
322
323static const char *
324record_btrace_adjust_size (unsigned int *size)
325{
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347}
348
349/* Print a BTS configuration. */
350
351static void
352record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353{
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363}
364
bc504a31 365/* Print an Intel Processor Trace configuration. */
b20a6524
MM
366
367static void
368record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369{
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379}
380
d33501a5
MM
381/* Print a branch tracing configuration. */
382
383static void
384record_btrace_print_conf (const struct btrace_config *conf)
385{
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
b20a6524
MM
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
d33501a5
MM
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404}
405
afedecd3
MM
406/* The to_info_record method of target record-btrace. */
407
408static void
630d6a4a 409record_btrace_info (struct target_ops *self)
afedecd3
MM
410{
411 struct btrace_thread_info *btinfo;
f4abbc16 412 const struct btrace_config *conf;
afedecd3 413 struct thread_info *tp;
31fd9caa 414 unsigned int insns, calls, gaps;
afedecd3
MM
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
cd4007e4
MM
422 validate_registers_access ();
423
f4abbc16
MM
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
d33501a5 428 record_btrace_print_conf (conf);
f4abbc16 429
afedecd3
MM
430 btrace_fetch (tp);
431
23a7fe75
MM
432 insns = 0;
433 calls = 0;
31fd9caa 434 gaps = 0;
23a7fe75 435
6e07b1d2 436 if (!btrace_is_empty (tp))
23a7fe75
MM
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
5de9129b 443 calls = btrace_call_number (&call);
23a7fe75
MM
444
445 btrace_insn_end (&insn, btinfo);
5de9129b 446 insns = btrace_insn_number (&insn);
31fd9caa 447
69090cee
TW
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
31fd9caa
MM
452
453 gaps = btinfo->ngaps;
23a7fe75 454 }
afedecd3 455
31fd9caa 456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
afedecd3
MM
463}
464
31fd9caa
MM
465/* Print a decode error. */
466
467static void
468btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470{
508352a9 471 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 472
112e8700 473 uiout->text (_("["));
508352a9
TW
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 476 {
112e8700
SM
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
31fd9caa 480 }
112e8700
SM
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
31fd9caa
MM
483}
484
afedecd3
MM
485/* Print an unsigned int. */
486
487static void
488ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489{
112e8700 490 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
491}
492
f94cc897
MM
493/* A range of source lines. */
494
495struct btrace_line_range
496{
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505};
506
507/* Construct a line range. */
508
509static struct btrace_line_range
510btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511{
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519}
520
521/* Add a line to a line range. */
522
523static struct btrace_line_range
524btrace_line_range_add (struct btrace_line_range range, int line)
525{
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538}
539
540/* Return non-zero if RANGE is empty, zero otherwise. */
541
542static int
543btrace_line_range_is_empty (struct btrace_line_range range)
544{
545 return range.end <= range.begin;
546}
547
548/* Return non-zero if LHS contains RHS, zero otherwise. */
549
550static int
551btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553{
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557}
558
559/* Find the line range associated with PC. */
560
561static struct btrace_line_range
562btrace_find_line_range (CORE_ADDR pc)
563{
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591}
592
593/* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602static void
603btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605{
8d297bbf 606 print_source_lines_flags psl_flags;
f94cc897
MM
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625}
626
afedecd3
MM
627/* Disassemble a section of the recorded instruction trace. */
628
629static void
23a7fe75 630btrace_insn_history (struct ui_out *uiout,
31fd9caa 631 const struct btrace_thread_info *btinfo,
23a7fe75
MM
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
afedecd3 634{
f94cc897 635 struct cleanup *cleanups, *ui_item_chain;
afedecd3 636 struct gdbarch *gdbarch;
23a7fe75 637 struct btrace_insn_iterator it;
f94cc897 638 struct btrace_line_range last_lines;
afedecd3 639
23a7fe75
MM
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
afedecd3 642
f94cc897
MM
643 flags |= DISASSEMBLY_SPECULATIVE;
644
afedecd3 645 gdbarch = target_gdbarch ();
f94cc897
MM
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
187808b0 648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
f94cc897
MM
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
afedecd3 653
8b172ce7
PA
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
23a7fe75 656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 657 {
23a7fe75
MM
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
31fd9caa
MM
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
afedecd3 668
31fd9caa
MM
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
69090cee
TW
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
677 conf->format);
678 }
679 else
680 {
f94cc897 681 struct disasm_insn dinsn;
da8c46d2 682
f94cc897 683 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 684 {
f94cc897
MM
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
da8c46d2 704 }
da8c46d2 705
f94cc897
MM
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
31fd9caa 709
da8c46d2 710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 711 dinsn.is_speculative = 1;
da8c46d2 712
8b172ce7 713 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 714 }
afedecd3 715 }
f94cc897
MM
716
717 do_cleanups (cleanups);
afedecd3
MM
718}
719
720/* The to_insn_history method of target record-btrace. */
721
722static void
7a6c5609 723record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
724{
725 struct btrace_thread_info *btinfo;
23a7fe75
MM
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
afedecd3 728 struct ui_out *uiout;
23a7fe75 729 unsigned int context, covered;
afedecd3
MM
730
731 uiout = current_uiout;
2e783024 732 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 733 context = abs (size);
afedecd3
MM
734 if (context == 0)
735 error (_("Bad record instruction-history-size."));
736
23a7fe75
MM
737 btinfo = require_btrace ();
738 history = btinfo->insn_history;
739 if (history == NULL)
afedecd3 740 {
07bbe694 741 struct btrace_insn_iterator *replay;
afedecd3 742
23a7fe75 743 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 744
07bbe694
MM
745 /* If we're replaying, we start at the replay position. Otherwise, we
746 start at the tail of the trace. */
747 replay = btinfo->replay;
748 if (replay != NULL)
749 begin = *replay;
750 else
751 btrace_insn_end (&begin, btinfo);
752
753 /* We start from here and expand in the requested direction. Then we
754 expand in the other direction, as well, to fill up any remaining
755 context. */
756 end = begin;
757 if (size < 0)
758 {
759 /* We want the current position covered, as well. */
760 covered = btrace_insn_next (&end, 1);
761 covered += btrace_insn_prev (&begin, context - covered);
762 covered += btrace_insn_next (&end, context - covered);
763 }
764 else
765 {
766 covered = btrace_insn_next (&end, context);
767 covered += btrace_insn_prev (&begin, context - covered);
768 }
afedecd3
MM
769 }
770 else
771 {
23a7fe75
MM
772 begin = history->begin;
773 end = history->end;
afedecd3 774
23a7fe75
MM
775 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
776 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 777
23a7fe75
MM
778 if (size < 0)
779 {
780 end = begin;
781 covered = btrace_insn_prev (&begin, context);
782 }
783 else
784 {
785 begin = end;
786 covered = btrace_insn_next (&end, context);
787 }
afedecd3
MM
788 }
789
23a7fe75 790 if (covered > 0)
31fd9caa 791 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
792 else
793 {
794 if (size < 0)
795 printf_unfiltered (_("At the start of the branch trace record.\n"));
796 else
797 printf_unfiltered (_("At the end of the branch trace record.\n"));
798 }
afedecd3 799
23a7fe75 800 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
801}
802
803/* The to_insn_history_range method of target record-btrace. */
804
805static void
4e99c6b7
TT
806record_btrace_insn_history_range (struct target_ops *self,
807 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
808{
809 struct btrace_thread_info *btinfo;
23a7fe75
MM
810 struct btrace_insn_history *history;
811 struct btrace_insn_iterator begin, end;
afedecd3 812 struct ui_out *uiout;
23a7fe75
MM
813 unsigned int low, high;
814 int found;
afedecd3
MM
815
816 uiout = current_uiout;
2e783024 817 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
818 low = from;
819 high = to;
afedecd3 820
23a7fe75 821 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
822
823 /* Check for wrap-arounds. */
23a7fe75 824 if (low != from || high != to)
afedecd3
MM
825 error (_("Bad range."));
826
0688d04e 827 if (high < low)
afedecd3
MM
828 error (_("Bad range."));
829
23a7fe75 830 btinfo = require_btrace ();
afedecd3 831
23a7fe75
MM
832 found = btrace_find_insn_by_number (&begin, btinfo, low);
833 if (found == 0)
834 error (_("Range out of bounds."));
afedecd3 835
23a7fe75
MM
836 found = btrace_find_insn_by_number (&end, btinfo, high);
837 if (found == 0)
0688d04e
MM
838 {
839 /* Silently truncate the range. */
840 btrace_insn_end (&end, btinfo);
841 }
842 else
843 {
844 /* We want both begin and end to be inclusive. */
845 btrace_insn_next (&end, 1);
846 }
afedecd3 847
31fd9caa 848 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 849 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
850}
851
852/* The to_insn_history_from method of target record-btrace. */
853
854static void
9abc3ff3
TT
855record_btrace_insn_history_from (struct target_ops *self,
856 ULONGEST from, int size, int flags)
afedecd3
MM
857{
858 ULONGEST begin, end, context;
859
860 context = abs (size);
0688d04e
MM
861 if (context == 0)
862 error (_("Bad record instruction-history-size."));
afedecd3
MM
863
864 if (size < 0)
865 {
866 end = from;
867
868 if (from < context)
869 begin = 0;
870 else
0688d04e 871 begin = from - context + 1;
afedecd3
MM
872 }
873 else
874 {
875 begin = from;
0688d04e 876 end = from + context - 1;
afedecd3
MM
877
878 /* Check for wrap-around. */
879 if (end < begin)
880 end = ULONGEST_MAX;
881 }
882
4e99c6b7 883 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
884}
885
886/* Print the instruction number range for a function call history line. */
887
888static void
23a7fe75
MM
889btrace_call_history_insn_range (struct ui_out *uiout,
890 const struct btrace_function *bfun)
afedecd3 891{
7acbe133
MM
892 unsigned int begin, end, size;
893
894 size = VEC_length (btrace_insn_s, bfun->insn);
895 gdb_assert (size > 0);
afedecd3 896
23a7fe75 897 begin = bfun->insn_offset;
7acbe133 898 end = begin + size - 1;
afedecd3 899
23a7fe75 900 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 901 uiout->text (",");
23a7fe75 902 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
903}
904
ce0dfbea
MM
905/* Compute the lowest and highest source line for the instructions in BFUN
906 and return them in PBEGIN and PEND.
907 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
908 result from inlining or macro expansion. */
909
910static void
911btrace_compute_src_line_range (const struct btrace_function *bfun,
912 int *pbegin, int *pend)
913{
914 struct btrace_insn *insn;
915 struct symtab *symtab;
916 struct symbol *sym;
917 unsigned int idx;
918 int begin, end;
919
920 begin = INT_MAX;
921 end = INT_MIN;
922
923 sym = bfun->sym;
924 if (sym == NULL)
925 goto out;
926
927 symtab = symbol_symtab (sym);
928
929 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
930 {
931 struct symtab_and_line sal;
932
933 sal = find_pc_line (insn->pc, 0);
934 if (sal.symtab != symtab || sal.line == 0)
935 continue;
936
325fac50
PA
937 begin = std::min (begin, sal.line);
938 end = std::max (end, sal.line);
ce0dfbea
MM
939 }
940
941 out:
942 *pbegin = begin;
943 *pend = end;
944}
945
afedecd3
MM
946/* Print the source line information for a function call history line. */
947
948static void
23a7fe75
MM
949btrace_call_history_src_line (struct ui_out *uiout,
950 const struct btrace_function *bfun)
afedecd3
MM
951{
952 struct symbol *sym;
23a7fe75 953 int begin, end;
afedecd3
MM
954
955 sym = bfun->sym;
956 if (sym == NULL)
957 return;
958
112e8700 959 uiout->field_string ("file",
08be3fe3 960 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 961
ce0dfbea 962 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 963 if (end < begin)
afedecd3
MM
964 return;
965
112e8700
SM
966 uiout->text (":");
967 uiout->field_int ("min line", begin);
afedecd3 968
23a7fe75 969 if (end == begin)
afedecd3
MM
970 return;
971
112e8700
SM
972 uiout->text (",");
973 uiout->field_int ("max line", end);
afedecd3
MM
974}
975
0b722aec
MM
976/* Get the name of a branch trace function. */
977
978static const char *
979btrace_get_bfun_name (const struct btrace_function *bfun)
980{
981 struct minimal_symbol *msym;
982 struct symbol *sym;
983
984 if (bfun == NULL)
985 return "??";
986
987 msym = bfun->msym;
988 sym = bfun->sym;
989
990 if (sym != NULL)
991 return SYMBOL_PRINT_NAME (sym);
992 else if (msym != NULL)
efd66ac6 993 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
994 else
995 return "??";
996}
997
afedecd3
MM
998/* Disassemble a section of the recorded function trace. */
999
1000static void
23a7fe75 1001btrace_call_history (struct ui_out *uiout,
8710b709 1002 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1003 const struct btrace_call_iterator *begin,
1004 const struct btrace_call_iterator *end,
8d297bbf 1005 int int_flags)
afedecd3 1006{
23a7fe75 1007 struct btrace_call_iterator it;
8d297bbf 1008 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1009
8d297bbf 1010 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1011 btrace_call_number (end));
afedecd3 1012
23a7fe75 1013 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1014 {
23a7fe75
MM
1015 const struct btrace_function *bfun;
1016 struct minimal_symbol *msym;
1017 struct symbol *sym;
1018
1019 bfun = btrace_call_get (&it);
23a7fe75 1020 sym = bfun->sym;
0b722aec 1021 msym = bfun->msym;
23a7fe75 1022
afedecd3 1023 /* Print the function index. */
23a7fe75 1024 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1025 uiout->text ("\t");
afedecd3 1026
31fd9caa
MM
1027 /* Indicate gaps in the trace. */
1028 if (bfun->errcode != 0)
1029 {
1030 const struct btrace_config *conf;
1031
1032 conf = btrace_conf (btinfo);
1033
1034 /* We have trace so we must have a configuration. */
1035 gdb_assert (conf != NULL);
1036
1037 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1038
1039 continue;
1040 }
1041
8710b709
MM
1042 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1043 {
1044 int level = bfun->level + btinfo->level, i;
1045
1046 for (i = 0; i < level; ++i)
112e8700 1047 uiout->text (" ");
8710b709
MM
1048 }
1049
1050 if (sym != NULL)
112e8700 1051 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1052 else if (msym != NULL)
112e8700
SM
1053 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1054 else if (!uiout->is_mi_like_p ())
1055 uiout->field_string ("function", "??");
8710b709 1056
1e038f67 1057 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1058 {
112e8700 1059 uiout->text (_("\tinst "));
23a7fe75 1060 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1061 }
1062
1e038f67 1063 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1064 {
112e8700 1065 uiout->text (_("\tat "));
23a7fe75 1066 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1067 }
1068
112e8700 1069 uiout->text ("\n");
afedecd3
MM
1070 }
1071}
1072
1073/* The to_call_history method of target record-btrace. */
1074
1075static void
8d297bbf 1076record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1077{
1078 struct btrace_thread_info *btinfo;
23a7fe75
MM
1079 struct btrace_call_history *history;
1080 struct btrace_call_iterator begin, end;
afedecd3 1081 struct ui_out *uiout;
23a7fe75 1082 unsigned int context, covered;
8d297bbf 1083 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1084
1085 uiout = current_uiout;
2e783024 1086 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1087 context = abs (size);
afedecd3
MM
1088 if (context == 0)
1089 error (_("Bad record function-call-history-size."));
1090
23a7fe75
MM
1091 btinfo = require_btrace ();
1092 history = btinfo->call_history;
1093 if (history == NULL)
afedecd3 1094 {
07bbe694 1095 struct btrace_insn_iterator *replay;
afedecd3 1096
8d297bbf 1097 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1098
07bbe694
MM
1099 /* If we're replaying, we start at the replay position. Otherwise, we
1100 start at the tail of the trace. */
1101 replay = btinfo->replay;
1102 if (replay != NULL)
1103 {
07bbe694 1104 begin.btinfo = btinfo;
a0f1b963 1105 begin.index = replay->call_index;
07bbe694
MM
1106 }
1107 else
1108 btrace_call_end (&begin, btinfo);
1109
1110 /* We start from here and expand in the requested direction. Then we
1111 expand in the other direction, as well, to fill up any remaining
1112 context. */
1113 end = begin;
1114 if (size < 0)
1115 {
1116 /* We want the current position covered, as well. */
1117 covered = btrace_call_next (&end, 1);
1118 covered += btrace_call_prev (&begin, context - covered);
1119 covered += btrace_call_next (&end, context - covered);
1120 }
1121 else
1122 {
1123 covered = btrace_call_next (&end, context);
1124 covered += btrace_call_prev (&begin, context- covered);
1125 }
afedecd3
MM
1126 }
1127 else
1128 {
23a7fe75
MM
1129 begin = history->begin;
1130 end = history->end;
afedecd3 1131
8d297bbf 1132 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1133 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1134
23a7fe75
MM
1135 if (size < 0)
1136 {
1137 end = begin;
1138 covered = btrace_call_prev (&begin, context);
1139 }
1140 else
1141 {
1142 begin = end;
1143 covered = btrace_call_next (&end, context);
1144 }
afedecd3
MM
1145 }
1146
23a7fe75 1147 if (covered > 0)
8710b709 1148 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1149 else
1150 {
1151 if (size < 0)
1152 printf_unfiltered (_("At the start of the branch trace record.\n"));
1153 else
1154 printf_unfiltered (_("At the end of the branch trace record.\n"));
1155 }
afedecd3 1156
23a7fe75 1157 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1158}
1159
1160/* The to_call_history_range method of target record-btrace. */
1161
1162static void
f0d960ea 1163record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1164 ULONGEST from, ULONGEST to,
1165 int int_flags)
afedecd3
MM
1166{
1167 struct btrace_thread_info *btinfo;
23a7fe75
MM
1168 struct btrace_call_history *history;
1169 struct btrace_call_iterator begin, end;
afedecd3 1170 struct ui_out *uiout;
23a7fe75
MM
1171 unsigned int low, high;
1172 int found;
8d297bbf 1173 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1174
1175 uiout = current_uiout;
2e783024 1176 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1177 low = from;
1178 high = to;
afedecd3 1179
8d297bbf 1180 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1181
1182 /* Check for wrap-arounds. */
23a7fe75 1183 if (low != from || high != to)
afedecd3
MM
1184 error (_("Bad range."));
1185
0688d04e 1186 if (high < low)
afedecd3
MM
1187 error (_("Bad range."));
1188
23a7fe75 1189 btinfo = require_btrace ();
afedecd3 1190
23a7fe75
MM
1191 found = btrace_find_call_by_number (&begin, btinfo, low);
1192 if (found == 0)
1193 error (_("Range out of bounds."));
afedecd3 1194
23a7fe75
MM
1195 found = btrace_find_call_by_number (&end, btinfo, high);
1196 if (found == 0)
0688d04e
MM
1197 {
1198 /* Silently truncate the range. */
1199 btrace_call_end (&end, btinfo);
1200 }
1201 else
1202 {
1203 /* We want both begin and end to be inclusive. */
1204 btrace_call_next (&end, 1);
1205 }
afedecd3 1206
8710b709 1207 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1208 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1209}
1210
1211/* The to_call_history_from method of target record-btrace. */
1212
1213static void
ec0aea04 1214record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1215 ULONGEST from, int size,
1216 int int_flags)
afedecd3
MM
1217{
1218 ULONGEST begin, end, context;
8d297bbf 1219 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1220
1221 context = abs (size);
0688d04e
MM
1222 if (context == 0)
1223 error (_("Bad record function-call-history-size."));
afedecd3
MM
1224
1225 if (size < 0)
1226 {
1227 end = from;
1228
1229 if (from < context)
1230 begin = 0;
1231 else
0688d04e 1232 begin = from - context + 1;
afedecd3
MM
1233 }
1234 else
1235 {
1236 begin = from;
0688d04e 1237 end = from + context - 1;
afedecd3
MM
1238
1239 /* Check for wrap-around. */
1240 if (end < begin)
1241 end = ULONGEST_MAX;
1242 }
1243
f0d960ea 1244 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1245}
1246
b158a20f
TW
1247/* The to_record_method method of target record-btrace. */
1248
1249static enum record_method
1250record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1251{
1252 const struct btrace_config *config;
1253 struct thread_info * const tp = find_thread_ptid (ptid);
1254
1255 if (tp == NULL)
1256 error (_("No thread."));
1257
1258 if (tp->btrace.target == NULL)
1259 return RECORD_METHOD_NONE;
1260
1261 return RECORD_METHOD_BTRACE;
1262}
1263
07bbe694
MM
1264/* The to_record_is_replaying method of target record-btrace. */
1265
1266static int
a52eab48 1267record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1268{
1269 struct thread_info *tp;
1270
034f788c 1271 ALL_NON_EXITED_THREADS (tp)
a52eab48 1272 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1273 return 1;
1274
1275 return 0;
1276}
1277
7ff27e9b
MM
1278/* The to_record_will_replay method of target record-btrace. */
1279
1280static int
1281record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1282{
1283 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1284}
1285
633785ff
MM
1286/* The to_xfer_partial method of target record-btrace. */
1287
9b409511 1288static enum target_xfer_status
633785ff
MM
1289record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1290 const char *annex, gdb_byte *readbuf,
1291 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1292 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1293{
1294 struct target_ops *t;
1295
1296 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1297 if (replay_memory_access == replay_memory_access_read_only
aef92902 1298 && !record_btrace_generating_corefile
4d10e986 1299 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1300 {
1301 switch (object)
1302 {
1303 case TARGET_OBJECT_MEMORY:
1304 {
1305 struct target_section *section;
1306
1307 /* We do not allow writing memory in general. */
1308 if (writebuf != NULL)
9b409511
YQ
1309 {
1310 *xfered_len = len;
bc113b4e 1311 return TARGET_XFER_UNAVAILABLE;
9b409511 1312 }
633785ff
MM
1313
1314 /* We allow reading readonly memory. */
1315 section = target_section_by_addr (ops, offset);
1316 if (section != NULL)
1317 {
1318 /* Check if the section we found is readonly. */
1319 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1320 section->the_bfd_section)
1321 & SEC_READONLY) != 0)
1322 {
1323 /* Truncate the request to fit into this section. */
325fac50 1324 len = std::min (len, section->endaddr - offset);
633785ff
MM
1325 break;
1326 }
1327 }
1328
9b409511 1329 *xfered_len = len;
bc113b4e 1330 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1331 }
1332 }
1333 }
1334
1335 /* Forward the request. */
e75fdfca
TT
1336 ops = ops->beneath;
1337 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1338 offset, len, xfered_len);
633785ff
MM
1339}
1340
1341/* The to_insert_breakpoint method of target record-btrace. */
1342
1343static int
1344record_btrace_insert_breakpoint (struct target_ops *ops,
1345 struct gdbarch *gdbarch,
1346 struct bp_target_info *bp_tgt)
1347{
67b5c0c1
MM
1348 const char *old;
1349 int ret;
633785ff
MM
1350
1351 /* Inserting breakpoints requires accessing memory. Allow it for the
1352 duration of this function. */
67b5c0c1
MM
1353 old = replay_memory_access;
1354 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1355
1356 ret = 0;
492d29ea
PA
1357 TRY
1358 {
1359 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1360 }
492d29ea
PA
1361 CATCH (except, RETURN_MASK_ALL)
1362 {
6c63c96a 1363 replay_memory_access = old;
492d29ea
PA
1364 throw_exception (except);
1365 }
1366 END_CATCH
6c63c96a 1367 replay_memory_access = old;
633785ff
MM
1368
1369 return ret;
1370}
1371
1372/* The to_remove_breakpoint method of target record-btrace. */
1373
1374static int
1375record_btrace_remove_breakpoint (struct target_ops *ops,
1376 struct gdbarch *gdbarch,
73971819
PA
1377 struct bp_target_info *bp_tgt,
1378 enum remove_bp_reason reason)
633785ff 1379{
67b5c0c1
MM
1380 const char *old;
1381 int ret;
633785ff
MM
1382
1383 /* Removing breakpoints requires accessing memory. Allow it for the
1384 duration of this function. */
67b5c0c1
MM
1385 old = replay_memory_access;
1386 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1387
1388 ret = 0;
492d29ea
PA
1389 TRY
1390 {
73971819
PA
1391 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1392 reason);
492d29ea 1393 }
492d29ea
PA
1394 CATCH (except, RETURN_MASK_ALL)
1395 {
6c63c96a 1396 replay_memory_access = old;
492d29ea
PA
1397 throw_exception (except);
1398 }
1399 END_CATCH
6c63c96a 1400 replay_memory_access = old;
633785ff
MM
1401
1402 return ret;
1403}
1404
1f3ef581
MM
1405/* The to_fetch_registers method of target record-btrace. */
1406
1407static void
1408record_btrace_fetch_registers (struct target_ops *ops,
1409 struct regcache *regcache, int regno)
1410{
1411 struct btrace_insn_iterator *replay;
1412 struct thread_info *tp;
1413
bcc0c096 1414 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1415 gdb_assert (tp != NULL);
1416
1417 replay = tp->btrace.replay;
aef92902 1418 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1419 {
1420 const struct btrace_insn *insn;
1421 struct gdbarch *gdbarch;
1422 int pcreg;
1423
1424 gdbarch = get_regcache_arch (regcache);
1425 pcreg = gdbarch_pc_regnum (gdbarch);
1426 if (pcreg < 0)
1427 return;
1428
1429 /* We can only provide the PC register. */
1430 if (regno >= 0 && regno != pcreg)
1431 return;
1432
1433 insn = btrace_insn_get (replay);
1434 gdb_assert (insn != NULL);
1435
1436 regcache_raw_supply (regcache, regno, &insn->pc);
1437 }
1438 else
1439 {
e75fdfca 1440 struct target_ops *t = ops->beneath;
1f3ef581 1441
e75fdfca 1442 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1443 }
1444}
1445
1446/* The to_store_registers method of target record-btrace. */
1447
1448static void
1449record_btrace_store_registers (struct target_ops *ops,
1450 struct regcache *regcache, int regno)
1451{
1452 struct target_ops *t;
1453
a52eab48 1454 if (!record_btrace_generating_corefile
bcc0c096 1455 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1456 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1457
1458 gdb_assert (may_write_registers != 0);
1459
e75fdfca
TT
1460 t = ops->beneath;
1461 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1462}
1463
1464/* The to_prepare_to_store method of target record-btrace. */
1465
1466static void
1467record_btrace_prepare_to_store (struct target_ops *ops,
1468 struct regcache *regcache)
1469{
1470 struct target_ops *t;
1471
a52eab48 1472 if (!record_btrace_generating_corefile
bcc0c096 1473 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1474 return;
1475
e75fdfca
TT
1476 t = ops->beneath;
1477 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1478}
1479
0b722aec
MM
1480/* The branch trace frame cache. */
1481
1482struct btrace_frame_cache
1483{
1484 /* The thread. */
1485 struct thread_info *tp;
1486
1487 /* The frame info. */
1488 struct frame_info *frame;
1489
1490 /* The branch trace function segment. */
1491 const struct btrace_function *bfun;
1492};
1493
1494/* A struct btrace_frame_cache hash table indexed by NEXT. */
1495
1496static htab_t bfcache;
1497
1498/* hash_f for htab_create_alloc of bfcache. */
1499
1500static hashval_t
1501bfcache_hash (const void *arg)
1502{
19ba03f4
SM
1503 const struct btrace_frame_cache *cache
1504 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1505
1506 return htab_hash_pointer (cache->frame);
1507}
1508
1509/* eq_f for htab_create_alloc of bfcache. */
1510
1511static int
1512bfcache_eq (const void *arg1, const void *arg2)
1513{
19ba03f4
SM
1514 const struct btrace_frame_cache *cache1
1515 = (const struct btrace_frame_cache *) arg1;
1516 const struct btrace_frame_cache *cache2
1517 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1518
1519 return cache1->frame == cache2->frame;
1520}
1521
1522/* Create a new btrace frame cache. */
1523
1524static struct btrace_frame_cache *
1525bfcache_new (struct frame_info *frame)
1526{
1527 struct btrace_frame_cache *cache;
1528 void **slot;
1529
1530 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1531 cache->frame = frame;
1532
1533 slot = htab_find_slot (bfcache, cache, INSERT);
1534 gdb_assert (*slot == NULL);
1535 *slot = cache;
1536
1537 return cache;
1538}
1539
1540/* Extract the branch trace function from a branch trace frame. */
1541
1542static const struct btrace_function *
1543btrace_get_frame_function (struct frame_info *frame)
1544{
1545 const struct btrace_frame_cache *cache;
1546 const struct btrace_function *bfun;
1547 struct btrace_frame_cache pattern;
1548 void **slot;
1549
1550 pattern.frame = frame;
1551
1552 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1553 if (slot == NULL)
1554 return NULL;
1555
19ba03f4 1556 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1557 return cache->bfun;
1558}
1559
cecac1ab
MM
1560/* Implement stop_reason method for record_btrace_frame_unwind. */
1561
1562static enum unwind_stop_reason
1563record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1564 void **this_cache)
1565{
0b722aec
MM
1566 const struct btrace_frame_cache *cache;
1567 const struct btrace_function *bfun;
1568
19ba03f4 1569 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1570 bfun = cache->bfun;
1571 gdb_assert (bfun != NULL);
1572
1573 if (bfun->up == NULL)
1574 return UNWIND_UNAVAILABLE;
1575
1576 return UNWIND_NO_REASON;
cecac1ab
MM
1577}
1578
1579/* Implement this_id method for record_btrace_frame_unwind. */
1580
1581static void
1582record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1583 struct frame_id *this_id)
1584{
0b722aec
MM
1585 const struct btrace_frame_cache *cache;
1586 const struct btrace_function *bfun;
1587 CORE_ADDR code, special;
1588
19ba03f4 1589 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1590
1591 bfun = cache->bfun;
1592 gdb_assert (bfun != NULL);
1593
1594 while (bfun->segment.prev != NULL)
1595 bfun = bfun->segment.prev;
1596
1597 code = get_frame_func (this_frame);
1598 special = bfun->number;
1599
1600 *this_id = frame_id_build_unavailable_stack_special (code, special);
1601
1602 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1603 btrace_get_bfun_name (cache->bfun),
1604 core_addr_to_string_nz (this_id->code_addr),
1605 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1606}
1607
1608/* Implement prev_register method for record_btrace_frame_unwind. */
1609
1610static struct value *
1611record_btrace_frame_prev_register (struct frame_info *this_frame,
1612 void **this_cache,
1613 int regnum)
1614{
0b722aec
MM
1615 const struct btrace_frame_cache *cache;
1616 const struct btrace_function *bfun, *caller;
1617 const struct btrace_insn *insn;
1618 struct gdbarch *gdbarch;
1619 CORE_ADDR pc;
1620 int pcreg;
1621
1622 gdbarch = get_frame_arch (this_frame);
1623 pcreg = gdbarch_pc_regnum (gdbarch);
1624 if (pcreg < 0 || regnum != pcreg)
1625 throw_error (NOT_AVAILABLE_ERROR,
1626 _("Registers are not available in btrace record history"));
1627
19ba03f4 1628 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1629 bfun = cache->bfun;
1630 gdb_assert (bfun != NULL);
1631
1632 caller = bfun->up;
1633 if (caller == NULL)
1634 throw_error (NOT_AVAILABLE_ERROR,
1635 _("No caller in btrace record history"));
1636
1637 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1638 {
1639 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1640 pc = insn->pc;
1641 }
1642 else
1643 {
1644 insn = VEC_last (btrace_insn_s, caller->insn);
1645 pc = insn->pc;
1646
1647 pc += gdb_insn_length (gdbarch, pc);
1648 }
1649
1650 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1651 btrace_get_bfun_name (bfun), bfun->level,
1652 core_addr_to_string_nz (pc));
1653
1654 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1655}
1656
1657/* Implement sniffer method for record_btrace_frame_unwind. */
1658
1659static int
1660record_btrace_frame_sniffer (const struct frame_unwind *self,
1661 struct frame_info *this_frame,
1662 void **this_cache)
1663{
0b722aec
MM
1664 const struct btrace_function *bfun;
1665 struct btrace_frame_cache *cache;
cecac1ab 1666 struct thread_info *tp;
0b722aec 1667 struct frame_info *next;
cecac1ab
MM
1668
1669 /* THIS_FRAME does not contain a reference to its thread. */
1670 tp = find_thread_ptid (inferior_ptid);
1671 gdb_assert (tp != NULL);
1672
0b722aec
MM
1673 bfun = NULL;
1674 next = get_next_frame (this_frame);
1675 if (next == NULL)
1676 {
1677 const struct btrace_insn_iterator *replay;
1678
1679 replay = tp->btrace.replay;
1680 if (replay != NULL)
a0f1b963 1681 bfun = replay->btinfo->functions[replay->call_index];
0b722aec
MM
1682 }
1683 else
1684 {
1685 const struct btrace_function *callee;
1686
1687 callee = btrace_get_frame_function (next);
1688 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1689 bfun = callee->up;
1690 }
1691
1692 if (bfun == NULL)
1693 return 0;
1694
1695 DEBUG ("[frame] sniffed frame for %s on level %d",
1696 btrace_get_bfun_name (bfun), bfun->level);
1697
1698 /* This is our frame. Initialize the frame cache. */
1699 cache = bfcache_new (this_frame);
1700 cache->tp = tp;
1701 cache->bfun = bfun;
1702
1703 *this_cache = cache;
1704 return 1;
1705}
1706
1707/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1708
1709static int
1710record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1711 struct frame_info *this_frame,
1712 void **this_cache)
1713{
1714 const struct btrace_function *bfun, *callee;
1715 struct btrace_frame_cache *cache;
1716 struct frame_info *next;
1717
1718 next = get_next_frame (this_frame);
1719 if (next == NULL)
1720 return 0;
1721
1722 callee = btrace_get_frame_function (next);
1723 if (callee == NULL)
1724 return 0;
1725
1726 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1727 return 0;
1728
1729 bfun = callee->up;
1730 if (bfun == NULL)
1731 return 0;
1732
1733 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1734 btrace_get_bfun_name (bfun), bfun->level);
1735
1736 /* This is our frame. Initialize the frame cache. */
1737 cache = bfcache_new (this_frame);
1738 cache->tp = find_thread_ptid (inferior_ptid);
1739 cache->bfun = bfun;
1740
1741 *this_cache = cache;
1742 return 1;
1743}
1744
1745static void
1746record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1747{
1748 struct btrace_frame_cache *cache;
1749 void **slot;
1750
19ba03f4 1751 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1752
1753 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1754 gdb_assert (slot != NULL);
1755
1756 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1757}
1758
1759/* btrace recording does not store previous memory content, neither the stack
1760 frames content. Any unwinding would return errorneous results as the stack
1761 contents no longer matches the changed PC value restored from history.
1762 Therefore this unwinder reports any possibly unwound registers as
1763 <unavailable>. */
1764
0b722aec 1765const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1766{
1767 NORMAL_FRAME,
1768 record_btrace_frame_unwind_stop_reason,
1769 record_btrace_frame_this_id,
1770 record_btrace_frame_prev_register,
1771 NULL,
0b722aec
MM
1772 record_btrace_frame_sniffer,
1773 record_btrace_frame_dealloc_cache
1774};
1775
1776const struct frame_unwind record_btrace_tailcall_frame_unwind =
1777{
1778 TAILCALL_FRAME,
1779 record_btrace_frame_unwind_stop_reason,
1780 record_btrace_frame_this_id,
1781 record_btrace_frame_prev_register,
1782 NULL,
1783 record_btrace_tailcall_frame_sniffer,
1784 record_btrace_frame_dealloc_cache
cecac1ab 1785};
b2f4cfde 1786
ac01945b
TT
1787/* Implement the to_get_unwinder method. */
1788
1789static const struct frame_unwind *
1790record_btrace_to_get_unwinder (struct target_ops *self)
1791{
1792 return &record_btrace_frame_unwind;
1793}
1794
1795/* Implement the to_get_tailcall_unwinder method. */
1796
1797static const struct frame_unwind *
1798record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1799{
1800 return &record_btrace_tailcall_frame_unwind;
1801}
1802
987e68b1
MM
1803/* Return a human-readable string for FLAG. */
1804
1805static const char *
1806btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1807{
1808 switch (flag)
1809 {
1810 case BTHR_STEP:
1811 return "step";
1812
1813 case BTHR_RSTEP:
1814 return "reverse-step";
1815
1816 case BTHR_CONT:
1817 return "cont";
1818
1819 case BTHR_RCONT:
1820 return "reverse-cont";
1821
1822 case BTHR_STOP:
1823 return "stop";
1824 }
1825
1826 return "<invalid>";
1827}
1828
52834460
MM
1829/* Indicate that TP should be resumed according to FLAG. */
1830
1831static void
1832record_btrace_resume_thread (struct thread_info *tp,
1833 enum btrace_thread_flag flag)
1834{
1835 struct btrace_thread_info *btinfo;
1836
43792cf0 1837 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1838 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1839
1840 btinfo = &tp->btrace;
1841
52834460
MM
1842 /* Fetch the latest branch trace. */
1843 btrace_fetch (tp);
1844
0ca912df
MM
1845 /* A resume request overwrites a preceding resume or stop request. */
1846 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1847 btinfo->flags |= flag;
1848}
1849
ec71cc2f
MM
1850/* Get the current frame for TP. */
1851
1852static struct frame_info *
1853get_thread_current_frame (struct thread_info *tp)
1854{
1855 struct frame_info *frame;
1856 ptid_t old_inferior_ptid;
1857 int executing;
1858
1859 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1860 old_inferior_ptid = inferior_ptid;
1861 inferior_ptid = tp->ptid;
1862
1863 /* Clear the executing flag to allow changes to the current frame.
1864 We are not actually running, yet. We just started a reverse execution
1865 command or a record goto command.
1866 For the latter, EXECUTING is false and this has no effect.
1867 For the former, EXECUTING is true and we're in to_wait, about to
1868 move the thread. Since we need to recompute the stack, we temporarily
1869 set EXECUTING to flase. */
1870 executing = is_executing (inferior_ptid);
1871 set_executing (inferior_ptid, 0);
1872
1873 frame = NULL;
1874 TRY
1875 {
1876 frame = get_current_frame ();
1877 }
1878 CATCH (except, RETURN_MASK_ALL)
1879 {
1880 /* Restore the previous execution state. */
1881 set_executing (inferior_ptid, executing);
1882
1883 /* Restore the previous inferior_ptid. */
1884 inferior_ptid = old_inferior_ptid;
1885
1886 throw_exception (except);
1887 }
1888 END_CATCH
1889
1890 /* Restore the previous execution state. */
1891 set_executing (inferior_ptid, executing);
1892
1893 /* Restore the previous inferior_ptid. */
1894 inferior_ptid = old_inferior_ptid;
1895
1896 return frame;
1897}
1898
52834460
MM
1899/* Start replaying a thread. */
1900
1901static struct btrace_insn_iterator *
1902record_btrace_start_replaying (struct thread_info *tp)
1903{
52834460
MM
1904 struct btrace_insn_iterator *replay;
1905 struct btrace_thread_info *btinfo;
52834460
MM
1906
1907 btinfo = &tp->btrace;
1908 replay = NULL;
1909
1910 /* We can't start replaying without trace. */
1911 if (btinfo->begin == NULL)
1912 return NULL;
1913
52834460
MM
1914 /* GDB stores the current frame_id when stepping in order to detects steps
1915 into subroutines.
1916 Since frames are computed differently when we're replaying, we need to
1917 recompute those stored frames and fix them up so we can still detect
1918 subroutines after we started replaying. */
492d29ea 1919 TRY
52834460
MM
1920 {
1921 struct frame_info *frame;
1922 struct frame_id frame_id;
1923 int upd_step_frame_id, upd_step_stack_frame_id;
1924
1925 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1926 frame = get_thread_current_frame (tp);
52834460
MM
1927 frame_id = get_frame_id (frame);
1928
1929 /* Check if we need to update any stepping-related frame id's. */
1930 upd_step_frame_id = frame_id_eq (frame_id,
1931 tp->control.step_frame_id);
1932 upd_step_stack_frame_id = frame_id_eq (frame_id,
1933 tp->control.step_stack_frame_id);
1934
1935 /* We start replaying at the end of the branch trace. This corresponds
1936 to the current instruction. */
8d749320 1937 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1938 btrace_insn_end (replay, btinfo);
1939
31fd9caa
MM
1940 /* Skip gaps at the end of the trace. */
1941 while (btrace_insn_get (replay) == NULL)
1942 {
1943 unsigned int steps;
1944
1945 steps = btrace_insn_prev (replay, 1);
1946 if (steps == 0)
1947 error (_("No trace."));
1948 }
1949
52834460
MM
1950 /* We're not replaying, yet. */
1951 gdb_assert (btinfo->replay == NULL);
1952 btinfo->replay = replay;
1953
1954 /* Make sure we're not using any stale registers. */
1955 registers_changed_ptid (tp->ptid);
1956
1957 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1958 frame = get_thread_current_frame (tp);
52834460
MM
1959 frame_id = get_frame_id (frame);
1960
1961 /* Replace stepping related frames where necessary. */
1962 if (upd_step_frame_id)
1963 tp->control.step_frame_id = frame_id;
1964 if (upd_step_stack_frame_id)
1965 tp->control.step_stack_frame_id = frame_id;
1966 }
492d29ea 1967 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1968 {
1969 xfree (btinfo->replay);
1970 btinfo->replay = NULL;
1971
1972 registers_changed_ptid (tp->ptid);
1973
1974 throw_exception (except);
1975 }
492d29ea 1976 END_CATCH
52834460
MM
1977
1978 return replay;
1979}
1980
1981/* Stop replaying a thread. */
1982
1983static void
1984record_btrace_stop_replaying (struct thread_info *tp)
1985{
1986 struct btrace_thread_info *btinfo;
1987
1988 btinfo = &tp->btrace;
1989
1990 xfree (btinfo->replay);
1991 btinfo->replay = NULL;
1992
1993 /* Make sure we're not leaving any stale registers. */
1994 registers_changed_ptid (tp->ptid);
1995}
1996
e3cfc1c7
MM
1997/* Stop replaying TP if it is at the end of its execution history. */
1998
1999static void
2000record_btrace_stop_replaying_at_end (struct thread_info *tp)
2001{
2002 struct btrace_insn_iterator *replay, end;
2003 struct btrace_thread_info *btinfo;
2004
2005 btinfo = &tp->btrace;
2006 replay = btinfo->replay;
2007
2008 if (replay == NULL)
2009 return;
2010
2011 btrace_insn_end (&end, btinfo);
2012
2013 if (btrace_insn_cmp (replay, &end) == 0)
2014 record_btrace_stop_replaying (tp);
2015}
2016
b2f4cfde
MM
2017/* The to_resume method of target record-btrace. */
2018
2019static void
2020record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2021 enum gdb_signal signal)
2022{
0ca912df 2023 struct thread_info *tp;
d2939ba2 2024 enum btrace_thread_flag flag, cflag;
52834460 2025
987e68b1
MM
2026 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2027 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2028 step ? "step" : "cont");
52834460 2029
0ca912df
MM
2030 /* Store the execution direction of the last resume.
2031
2032 If there is more than one to_resume call, we have to rely on infrun
2033 to not change the execution direction in-between. */
70ad5bff
MM
2034 record_btrace_resume_exec_dir = execution_direction;
2035
0ca912df 2036 /* As long as we're not replaying, just forward the request.
52834460 2037
0ca912df
MM
2038 For non-stop targets this means that no thread is replaying. In order to
2039 make progress, we may need to explicitly move replaying threads to the end
2040 of their execution history. */
a52eab48
MM
2041 if ((execution_direction != EXEC_REVERSE)
2042 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2043 {
e75fdfca 2044 ops = ops->beneath;
04c4fe8c
MM
2045 ops->to_resume (ops, ptid, step, signal);
2046 return;
b2f4cfde
MM
2047 }
2048
52834460 2049 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2050 if (execution_direction == EXEC_REVERSE)
2051 {
2052 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2053 cflag = BTHR_RCONT;
2054 }
52834460 2055 else
d2939ba2
MM
2056 {
2057 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2058 cflag = BTHR_CONT;
2059 }
52834460 2060
52834460 2061 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2062 record_btrace_wait below.
2063
2064 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2065 if (!target_is_non_stop_p ())
2066 {
2067 gdb_assert (ptid_match (inferior_ptid, ptid));
2068
2069 ALL_NON_EXITED_THREADS (tp)
2070 if (ptid_match (tp->ptid, ptid))
2071 {
2072 if (ptid_match (tp->ptid, inferior_ptid))
2073 record_btrace_resume_thread (tp, flag);
2074 else
2075 record_btrace_resume_thread (tp, cflag);
2076 }
2077 }
2078 else
2079 {
2080 ALL_NON_EXITED_THREADS (tp)
2081 if (ptid_match (tp->ptid, ptid))
2082 record_btrace_resume_thread (tp, flag);
2083 }
70ad5bff
MM
2084
2085 /* Async support. */
2086 if (target_can_async_p ())
2087 {
6a3753b3 2088 target_async (1);
70ad5bff
MM
2089 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2090 }
52834460
MM
2091}
2092
85ad3aaf
PA
2093/* The to_commit_resume method of target record-btrace. */
2094
2095static void
2096record_btrace_commit_resume (struct target_ops *ops)
2097{
2098 if ((execution_direction != EXEC_REVERSE)
2099 && !record_btrace_is_replaying (ops, minus_one_ptid))
2100 ops->beneath->to_commit_resume (ops->beneath);
2101}
2102
987e68b1
MM
2103/* Cancel resuming TP. */
2104
2105static void
2106record_btrace_cancel_resume (struct thread_info *tp)
2107{
2108 enum btrace_thread_flag flags;
2109
2110 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2111 if (flags == 0)
2112 return;
2113
43792cf0
PA
2114 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2115 print_thread_id (tp),
987e68b1
MM
2116 target_pid_to_str (tp->ptid), flags,
2117 btrace_thread_flag_to_str (flags));
2118
2119 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2120 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2121}
2122
2123/* Return a target_waitstatus indicating that we ran out of history. */
2124
2125static struct target_waitstatus
2126btrace_step_no_history (void)
2127{
2128 struct target_waitstatus status;
2129
2130 status.kind = TARGET_WAITKIND_NO_HISTORY;
2131
2132 return status;
2133}
2134
2135/* Return a target_waitstatus indicating that a step finished. */
2136
2137static struct target_waitstatus
2138btrace_step_stopped (void)
2139{
2140 struct target_waitstatus status;
2141
2142 status.kind = TARGET_WAITKIND_STOPPED;
2143 status.value.sig = GDB_SIGNAL_TRAP;
2144
2145 return status;
2146}
2147
6e4879f0
MM
2148/* Return a target_waitstatus indicating that a thread was stopped as
2149 requested. */
2150
2151static struct target_waitstatus
2152btrace_step_stopped_on_request (void)
2153{
2154 struct target_waitstatus status;
2155
2156 status.kind = TARGET_WAITKIND_STOPPED;
2157 status.value.sig = GDB_SIGNAL_0;
2158
2159 return status;
2160}
2161
d825d248
MM
2162/* Return a target_waitstatus indicating a spurious stop. */
2163
2164static struct target_waitstatus
2165btrace_step_spurious (void)
2166{
2167 struct target_waitstatus status;
2168
2169 status.kind = TARGET_WAITKIND_SPURIOUS;
2170
2171 return status;
2172}
2173
e3cfc1c7
MM
2174/* Return a target_waitstatus indicating that the thread was not resumed. */
2175
2176static struct target_waitstatus
2177btrace_step_no_resumed (void)
2178{
2179 struct target_waitstatus status;
2180
2181 status.kind = TARGET_WAITKIND_NO_RESUMED;
2182
2183 return status;
2184}
2185
2186/* Return a target_waitstatus indicating that we should wait again. */
2187
2188static struct target_waitstatus
2189btrace_step_again (void)
2190{
2191 struct target_waitstatus status;
2192
2193 status.kind = TARGET_WAITKIND_IGNORE;
2194
2195 return status;
2196}
2197
52834460
MM
2198/* Clear the record histories. */
2199
2200static void
2201record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2202{
2203 xfree (btinfo->insn_history);
2204 xfree (btinfo->call_history);
2205
2206 btinfo->insn_history = NULL;
2207 btinfo->call_history = NULL;
2208}
2209
3c615f99
MM
2210/* Check whether TP's current replay position is at a breakpoint. */
2211
2212static int
2213record_btrace_replay_at_breakpoint (struct thread_info *tp)
2214{
2215 struct btrace_insn_iterator *replay;
2216 struct btrace_thread_info *btinfo;
2217 const struct btrace_insn *insn;
2218 struct inferior *inf;
2219
2220 btinfo = &tp->btrace;
2221 replay = btinfo->replay;
2222
2223 if (replay == NULL)
2224 return 0;
2225
2226 insn = btrace_insn_get (replay);
2227 if (insn == NULL)
2228 return 0;
2229
2230 inf = find_inferior_ptid (tp->ptid);
2231 if (inf == NULL)
2232 return 0;
2233
2234 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2235 &btinfo->stop_reason);
2236}
2237
d825d248 2238/* Step one instruction in forward direction. */
52834460
MM
2239
2240static struct target_waitstatus
d825d248 2241record_btrace_single_step_forward (struct thread_info *tp)
52834460 2242{
b61ce85c 2243 struct btrace_insn_iterator *replay, end, start;
52834460 2244 struct btrace_thread_info *btinfo;
52834460 2245
d825d248
MM
2246 btinfo = &tp->btrace;
2247 replay = btinfo->replay;
2248
2249 /* We're done if we're not replaying. */
2250 if (replay == NULL)
2251 return btrace_step_no_history ();
2252
011c71b6
MM
2253 /* Check if we're stepping a breakpoint. */
2254 if (record_btrace_replay_at_breakpoint (tp))
2255 return btrace_step_stopped ();
2256
b61ce85c
MM
2257 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2258 jump back to the instruction at which we started. */
2259 start = *replay;
d825d248
MM
2260 do
2261 {
2262 unsigned int steps;
2263
e3cfc1c7
MM
2264 /* We will bail out here if we continue stepping after reaching the end
2265 of the execution history. */
d825d248
MM
2266 steps = btrace_insn_next (replay, 1);
2267 if (steps == 0)
b61ce85c
MM
2268 {
2269 *replay = start;
2270 return btrace_step_no_history ();
2271 }
d825d248
MM
2272 }
2273 while (btrace_insn_get (replay) == NULL);
2274
2275 /* Determine the end of the instruction trace. */
2276 btrace_insn_end (&end, btinfo);
2277
e3cfc1c7
MM
2278 /* The execution trace contains (and ends with) the current instruction.
2279 This instruction has not been executed, yet, so the trace really ends
2280 one instruction earlier. */
d825d248 2281 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2282 return btrace_step_no_history ();
d825d248
MM
2283
2284 return btrace_step_spurious ();
2285}
2286
2287/* Step one instruction in backward direction. */
2288
2289static struct target_waitstatus
2290record_btrace_single_step_backward (struct thread_info *tp)
2291{
b61ce85c 2292 struct btrace_insn_iterator *replay, start;
d825d248 2293 struct btrace_thread_info *btinfo;
e59fa00f 2294
52834460
MM
2295 btinfo = &tp->btrace;
2296 replay = btinfo->replay;
2297
d825d248
MM
2298 /* Start replaying if we're not already doing so. */
2299 if (replay == NULL)
2300 replay = record_btrace_start_replaying (tp);
2301
2302 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2303 Skip gaps during replay. If we end up at a gap (at the beginning of
2304 the trace), jump back to the instruction at which we started. */
2305 start = *replay;
d825d248
MM
2306 do
2307 {
2308 unsigned int steps;
2309
2310 steps = btrace_insn_prev (replay, 1);
2311 if (steps == 0)
b61ce85c
MM
2312 {
2313 *replay = start;
2314 return btrace_step_no_history ();
2315 }
d825d248
MM
2316 }
2317 while (btrace_insn_get (replay) == NULL);
2318
011c71b6
MM
2319 /* Check if we're stepping a breakpoint.
2320
2321 For reverse-stepping, this check is after the step. There is logic in
2322 infrun.c that handles reverse-stepping separately. See, for example,
2323 proceed and adjust_pc_after_break.
2324
2325 This code assumes that for reverse-stepping, PC points to the last
2326 de-executed instruction, whereas for forward-stepping PC points to the
2327 next to-be-executed instruction. */
2328 if (record_btrace_replay_at_breakpoint (tp))
2329 return btrace_step_stopped ();
2330
d825d248
MM
2331 return btrace_step_spurious ();
2332}
2333
2334/* Step a single thread. */
2335
2336static struct target_waitstatus
2337record_btrace_step_thread (struct thread_info *tp)
2338{
2339 struct btrace_thread_info *btinfo;
2340 struct target_waitstatus status;
2341 enum btrace_thread_flag flags;
2342
2343 btinfo = &tp->btrace;
2344
6e4879f0
MM
2345 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2346 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2347
43792cf0 2348 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2349 target_pid_to_str (tp->ptid), flags,
2350 btrace_thread_flag_to_str (flags));
52834460 2351
6e4879f0
MM
2352 /* We can't step without an execution history. */
2353 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2354 return btrace_step_no_history ();
2355
52834460
MM
2356 switch (flags)
2357 {
2358 default:
2359 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2360
6e4879f0
MM
2361 case BTHR_STOP:
2362 return btrace_step_stopped_on_request ();
2363
52834460 2364 case BTHR_STEP:
d825d248
MM
2365 status = record_btrace_single_step_forward (tp);
2366 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2367 break;
52834460
MM
2368
2369 return btrace_step_stopped ();
2370
2371 case BTHR_RSTEP:
d825d248
MM
2372 status = record_btrace_single_step_backward (tp);
2373 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2374 break;
52834460
MM
2375
2376 return btrace_step_stopped ();
2377
2378 case BTHR_CONT:
e3cfc1c7
MM
2379 status = record_btrace_single_step_forward (tp);
2380 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2381 break;
52834460 2382
e3cfc1c7
MM
2383 btinfo->flags |= flags;
2384 return btrace_step_again ();
52834460
MM
2385
2386 case BTHR_RCONT:
e3cfc1c7
MM
2387 status = record_btrace_single_step_backward (tp);
2388 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2389 break;
52834460 2390
e3cfc1c7
MM
2391 btinfo->flags |= flags;
2392 return btrace_step_again ();
2393 }
d825d248 2394
e3cfc1c7
MM
2395 /* We keep threads moving at the end of their execution history. The to_wait
2396 method will stop the thread for whom the event is reported. */
2397 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2398 btinfo->flags |= flags;
52834460 2399
e3cfc1c7 2400 return status;
b2f4cfde
MM
2401}
2402
e3cfc1c7
MM
2403/* A vector of threads. */
2404
2405typedef struct thread_info * tp_t;
2406DEF_VEC_P (tp_t);
2407
a6b5be76
MM
2408/* Announce further events if necessary. */
2409
2410static void
2411record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2412 const VEC (tp_t) *no_history)
2413{
2414 int more_moving, more_no_history;
2415
2416 more_moving = !VEC_empty (tp_t, moving);
2417 more_no_history = !VEC_empty (tp_t, no_history);
2418
2419 if (!more_moving && !more_no_history)
2420 return;
2421
2422 if (more_moving)
2423 DEBUG ("movers pending");
2424
2425 if (more_no_history)
2426 DEBUG ("no-history pending");
2427
2428 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2429}
2430
b2f4cfde
MM
2431/* The to_wait method of target record-btrace. */
2432
2433static ptid_t
2434record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2435 struct target_waitstatus *status, int options)
2436{
e3cfc1c7
MM
2437 VEC (tp_t) *moving, *no_history;
2438 struct thread_info *tp, *eventing;
2439 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2440
2441 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2442
b2f4cfde 2443 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2444 if ((execution_direction != EXEC_REVERSE)
2445 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2446 {
e75fdfca
TT
2447 ops = ops->beneath;
2448 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2449 }
2450
e3cfc1c7
MM
2451 moving = NULL;
2452 no_history = NULL;
2453
2454 make_cleanup (VEC_cleanup (tp_t), &moving);
2455 make_cleanup (VEC_cleanup (tp_t), &no_history);
2456
2457 /* Keep a work list of moving threads. */
2458 ALL_NON_EXITED_THREADS (tp)
2459 if (ptid_match (tp->ptid, ptid)
2460 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2461 VEC_safe_push (tp_t, moving, tp);
2462
2463 if (VEC_empty (tp_t, moving))
52834460 2464 {
e3cfc1c7 2465 *status = btrace_step_no_resumed ();
52834460 2466
e3cfc1c7
MM
2467 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2468 target_waitstatus_to_string (status));
2469
2470 do_cleanups (cleanups);
2471 return null_ptid;
52834460
MM
2472 }
2473
e3cfc1c7
MM
2474 /* Step moving threads one by one, one step each, until either one thread
2475 reports an event or we run out of threads to step.
2476
2477 When stepping more than one thread, chances are that some threads reach
2478 the end of their execution history earlier than others. If we reported
2479 this immediately, all-stop on top of non-stop would stop all threads and
2480 resume the same threads next time. And we would report the same thread
2481 having reached the end of its execution history again.
2482
2483 In the worst case, this would starve the other threads. But even if other
2484 threads would be allowed to make progress, this would result in far too
2485 many intermediate stops.
2486
2487 We therefore delay the reporting of "no execution history" until we have
2488 nothing else to report. By this time, all threads should have moved to
2489 either the beginning or the end of their execution history. There will
2490 be a single user-visible stop. */
2491 eventing = NULL;
2492 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2493 {
2494 unsigned int ix;
2495
2496 ix = 0;
2497 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2498 {
2499 *status = record_btrace_step_thread (tp);
2500
2501 switch (status->kind)
2502 {
2503 case TARGET_WAITKIND_IGNORE:
2504 ix++;
2505 break;
2506
2507 case TARGET_WAITKIND_NO_HISTORY:
2508 VEC_safe_push (tp_t, no_history,
2509 VEC_ordered_remove (tp_t, moving, ix));
2510 break;
2511
2512 default:
2513 eventing = VEC_unordered_remove (tp_t, moving, ix);
2514 break;
2515 }
2516 }
2517 }
2518
2519 if (eventing == NULL)
2520 {
2521 /* We started with at least one moving thread. This thread must have
2522 either stopped or reached the end of its execution history.
2523
2524 In the former case, EVENTING must not be NULL.
2525 In the latter case, NO_HISTORY must not be empty. */
2526 gdb_assert (!VEC_empty (tp_t, no_history));
2527
2528 /* We kept threads moving at the end of their execution history. Stop
2529 EVENTING now that we are going to report its stop. */
2530 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2531 eventing->btrace.flags &= ~BTHR_MOVE;
2532
2533 *status = btrace_step_no_history ();
2534 }
2535
2536 gdb_assert (eventing != NULL);
2537
2538 /* We kept threads replaying at the end of their execution history. Stop
2539 replaying EVENTING now that we are going to report its stop. */
2540 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2541
2542 /* Stop all other threads. */
5953356c 2543 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2544 ALL_NON_EXITED_THREADS (tp)
2545 record_btrace_cancel_resume (tp);
52834460 2546
a6b5be76
MM
2547 /* In async mode, we need to announce further events. */
2548 if (target_is_async_p ())
2549 record_btrace_maybe_mark_async_event (moving, no_history);
2550
52834460 2551 /* Start record histories anew from the current position. */
e3cfc1c7 2552 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2553
2554 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2555 registers_changed_ptid (eventing->ptid);
2556
43792cf0
PA
2557 DEBUG ("wait ended by thread %s (%s): %s",
2558 print_thread_id (eventing),
e3cfc1c7
MM
2559 target_pid_to_str (eventing->ptid),
2560 target_waitstatus_to_string (status));
52834460 2561
e3cfc1c7
MM
2562 do_cleanups (cleanups);
2563 return eventing->ptid;
52834460
MM
2564}
2565
6e4879f0
MM
2566/* The to_stop method of target record-btrace. */
2567
2568static void
2569record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2570{
2571 DEBUG ("stop %s", target_pid_to_str (ptid));
2572
2573 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2574 if ((execution_direction != EXEC_REVERSE)
2575 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2576 {
2577 ops = ops->beneath;
2578 ops->to_stop (ops, ptid);
2579 }
2580 else
2581 {
2582 struct thread_info *tp;
2583
2584 ALL_NON_EXITED_THREADS (tp)
2585 if (ptid_match (tp->ptid, ptid))
2586 {
2587 tp->btrace.flags &= ~BTHR_MOVE;
2588 tp->btrace.flags |= BTHR_STOP;
2589 }
2590 }
2591 }
2592
52834460
MM
2593/* The to_can_execute_reverse method of target record-btrace. */
2594
2595static int
19db3e69 2596record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2597{
2598 return 1;
2599}
2600
9e8915c6 2601/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2602
9e8915c6
PA
2603static int
2604record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2605{
a52eab48 2606 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2607 {
2608 struct thread_info *tp = inferior_thread ();
2609
2610 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2611 }
2612
2613 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2614}
2615
2616/* The to_supports_stopped_by_sw_breakpoint method of target
2617 record-btrace. */
2618
2619static int
2620record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2621{
a52eab48 2622 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2623 return 1;
2624
2625 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2626}
2627
2628/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2629
2630static int
2631record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2632{
a52eab48 2633 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2634 {
2635 struct thread_info *tp = inferior_thread ();
2636
2637 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2638 }
2639
2640 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2641}
2642
2643/* The to_supports_stopped_by_hw_breakpoint method of target
2644 record-btrace. */
2645
2646static int
2647record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2648{
a52eab48 2649 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2650 return 1;
52834460 2651
9e8915c6 2652 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2653}
2654
e8032dde 2655/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2656
2657static void
e8032dde 2658record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2659{
e8032dde 2660 /* We don't add or remove threads during replay. */
a52eab48 2661 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2662 return;
2663
2664 /* Forward the request. */
e75fdfca 2665 ops = ops->beneath;
e8032dde 2666 ops->to_update_thread_list (ops);
e2887aa3
MM
2667}
2668
2669/* The to_thread_alive method of target record-btrace. */
2670
2671static int
2672record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2673{
2674 /* We don't add or remove threads during replay. */
a52eab48 2675 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2676 return find_thread_ptid (ptid) != NULL;
2677
2678 /* Forward the request. */
e75fdfca
TT
2679 ops = ops->beneath;
2680 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2681}
2682
066ce621
MM
2683/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2684 is stopped. */
2685
2686static void
2687record_btrace_set_replay (struct thread_info *tp,
2688 const struct btrace_insn_iterator *it)
2689{
2690 struct btrace_thread_info *btinfo;
2691
2692 btinfo = &tp->btrace;
2693
a0f1b963 2694 if (it == NULL)
52834460 2695 record_btrace_stop_replaying (tp);
066ce621
MM
2696 else
2697 {
2698 if (btinfo->replay == NULL)
52834460 2699 record_btrace_start_replaying (tp);
066ce621
MM
2700 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2701 return;
2702
2703 *btinfo->replay = *it;
52834460 2704 registers_changed_ptid (tp->ptid);
066ce621
MM
2705 }
2706
52834460
MM
2707 /* Start anew from the new replay position. */
2708 record_btrace_clear_histories (btinfo);
485668e5
MM
2709
2710 stop_pc = regcache_read_pc (get_current_regcache ());
2711 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2712}
2713
2714/* The to_goto_record_begin method of target record-btrace. */
2715
2716static void
08475817 2717record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2718{
2719 struct thread_info *tp;
2720 struct btrace_insn_iterator begin;
2721
2722 tp = require_btrace_thread ();
2723
2724 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2725
2726 /* Skip gaps at the beginning of the trace. */
2727 while (btrace_insn_get (&begin) == NULL)
2728 {
2729 unsigned int steps;
2730
2731 steps = btrace_insn_next (&begin, 1);
2732 if (steps == 0)
2733 error (_("No trace."));
2734 }
2735
066ce621 2736 record_btrace_set_replay (tp, &begin);
066ce621
MM
2737}
2738
2739/* The to_goto_record_end method of target record-btrace. */
2740
2741static void
307a1b91 2742record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2743{
2744 struct thread_info *tp;
2745
2746 tp = require_btrace_thread ();
2747
2748 record_btrace_set_replay (tp, NULL);
066ce621
MM
2749}
2750
2751/* The to_goto_record method of target record-btrace. */
2752
2753static void
606183ac 2754record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2755{
2756 struct thread_info *tp;
2757 struct btrace_insn_iterator it;
2758 unsigned int number;
2759 int found;
2760
2761 number = insn;
2762
2763 /* Check for wrap-arounds. */
2764 if (number != insn)
2765 error (_("Instruction number out of range."));
2766
2767 tp = require_btrace_thread ();
2768
2769 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2770
2771 /* Check if the instruction could not be found or is a gap. */
2772 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2773 error (_("No such instruction."));
2774
2775 record_btrace_set_replay (tp, &it);
066ce621
MM
2776}
2777
797094dd
MM
2778/* The to_record_stop_replaying method of target record-btrace. */
2779
2780static void
2781record_btrace_stop_replaying_all (struct target_ops *self)
2782{
2783 struct thread_info *tp;
2784
2785 ALL_NON_EXITED_THREADS (tp)
2786 record_btrace_stop_replaying (tp);
2787}
2788
70ad5bff
MM
2789/* The to_execution_direction target method. */
2790
2791static enum exec_direction_kind
2792record_btrace_execution_direction (struct target_ops *self)
2793{
2794 return record_btrace_resume_exec_dir;
2795}
2796
aef92902
MM
2797/* The to_prepare_to_generate_core target method. */
2798
2799static void
2800record_btrace_prepare_to_generate_core (struct target_ops *self)
2801{
2802 record_btrace_generating_corefile = 1;
2803}
2804
2805/* The to_done_generating_core target method. */
2806
2807static void
2808record_btrace_done_generating_core (struct target_ops *self)
2809{
2810 record_btrace_generating_corefile = 0;
2811}
2812
afedecd3
MM
2813/* Initialize the record-btrace target ops. */
2814
2815static void
2816init_record_btrace_ops (void)
2817{
2818 struct target_ops *ops;
2819
2820 ops = &record_btrace_ops;
2821 ops->to_shortname = "record-btrace";
2822 ops->to_longname = "Branch tracing target";
2823 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2824 ops->to_open = record_btrace_open;
2825 ops->to_close = record_btrace_close;
b7d2e916 2826 ops->to_async = record_btrace_async;
afedecd3 2827 ops->to_detach = record_detach;
c0272db5 2828 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2829 ops->to_mourn_inferior = record_mourn_inferior;
2830 ops->to_kill = record_kill;
afedecd3
MM
2831 ops->to_stop_recording = record_btrace_stop_recording;
2832 ops->to_info_record = record_btrace_info;
2833 ops->to_insn_history = record_btrace_insn_history;
2834 ops->to_insn_history_from = record_btrace_insn_history_from;
2835 ops->to_insn_history_range = record_btrace_insn_history_range;
2836 ops->to_call_history = record_btrace_call_history;
2837 ops->to_call_history_from = record_btrace_call_history_from;
2838 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2839 ops->to_record_method = record_btrace_record_method;
07bbe694 2840 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2841 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2842 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2843 ops->to_xfer_partial = record_btrace_xfer_partial;
2844 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2845 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2846 ops->to_fetch_registers = record_btrace_fetch_registers;
2847 ops->to_store_registers = record_btrace_store_registers;
2848 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2849 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2850 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2851 ops->to_resume = record_btrace_resume;
85ad3aaf 2852 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2853 ops->to_wait = record_btrace_wait;
6e4879f0 2854 ops->to_stop = record_btrace_stop;
e8032dde 2855 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2856 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2857 ops->to_goto_record_begin = record_btrace_goto_begin;
2858 ops->to_goto_record_end = record_btrace_goto_end;
2859 ops->to_goto_record = record_btrace_goto;
52834460 2860 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2861 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2862 ops->to_supports_stopped_by_sw_breakpoint
2863 = record_btrace_supports_stopped_by_sw_breakpoint;
2864 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2865 ops->to_supports_stopped_by_hw_breakpoint
2866 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2867 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2868 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2869 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2870 ops->to_stratum = record_stratum;
2871 ops->to_magic = OPS_MAGIC;
2872}
2873
f4abbc16
MM
2874/* Start recording in BTS format. */
2875
2876static void
2877cmd_record_btrace_bts_start (char *args, int from_tty)
2878{
f4abbc16
MM
2879 if (args != NULL && *args != 0)
2880 error (_("Invalid argument."));
2881
2882 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2883
492d29ea
PA
2884 TRY
2885 {
9b2eba3d 2886 execute_command ((char *) "target record-btrace", from_tty);
492d29ea
PA
2887 }
2888 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2889 {
2890 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2891 throw_exception (exception);
2892 }
492d29ea 2893 END_CATCH
f4abbc16
MM
2894}
2895
bc504a31 2896/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2897
2898static void
b20a6524 2899cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2900{
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2903
b20a6524 2904 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2905
492d29ea
PA
2906 TRY
2907 {
9b2eba3d 2908 execute_command ((char *) "target record-btrace", from_tty);
492d29ea
PA
2909 }
2910 CATCH (exception, RETURN_MASK_ALL)
2911 {
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2913 throw_exception (exception);
2914 }
2915 END_CATCH
afedecd3
MM
2916}
2917
b20a6524
MM
2918/* Alias for "target record". */
2919
2920static void
2921cmd_record_btrace_start (char *args, int from_tty)
2922{
2923 if (args != NULL && *args != 0)
2924 error (_("Invalid argument."));
2925
2926 record_btrace_conf.format = BTRACE_FORMAT_PT;
2927
2928 TRY
2929 {
9b2eba3d 2930 execute_command ((char *) "target record-btrace", from_tty);
b20a6524
MM
2931 }
2932 CATCH (exception, RETURN_MASK_ALL)
2933 {
2934 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2935
2936 TRY
2937 {
9b2eba3d 2938 execute_command ((char *) "target record-btrace", from_tty);
b20a6524
MM
2939 }
2940 CATCH (exception, RETURN_MASK_ALL)
2941 {
2942 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2943 throw_exception (exception);
2944 }
2945 END_CATCH
2946 }
2947 END_CATCH
2948}
2949
67b5c0c1
MM
2950/* The "set record btrace" command. */
2951
2952static void
2953cmd_set_record_btrace (char *args, int from_tty)
2954{
2955 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2956}
2957
2958/* The "show record btrace" command. */
2959
2960static void
2961cmd_show_record_btrace (char *args, int from_tty)
2962{
2963 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2964}
2965
2966/* The "show record btrace replay-memory-access" command. */
2967
2968static void
2969cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2970 struct cmd_list_element *c, const char *value)
2971{
2972 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2973 replay_memory_access);
2974}
2975
d33501a5
MM
2976/* The "set record btrace bts" command. */
2977
2978static void
2979cmd_set_record_btrace_bts (char *args, int from_tty)
2980{
2981 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2982 "by an appropriate subcommand.\n"));
d33501a5
MM
2983 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2984 all_commands, gdb_stdout);
2985}
2986
2987/* The "show record btrace bts" command. */
2988
2989static void
2990cmd_show_record_btrace_bts (char *args, int from_tty)
2991{
2992 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2993}
2994
b20a6524
MM
2995/* The "set record btrace pt" command. */
2996
2997static void
2998cmd_set_record_btrace_pt (char *args, int from_tty)
2999{
3000 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3001 "by an appropriate subcommand.\n"));
3002 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3003 all_commands, gdb_stdout);
3004}
3005
3006/* The "show record btrace pt" command. */
3007
3008static void
3009cmd_show_record_btrace_pt (char *args, int from_tty)
3010{
3011 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3012}
3013
3014/* The "record bts buffer-size" show value function. */
3015
3016static void
3017show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3018 struct cmd_list_element *c,
3019 const char *value)
3020{
3021 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3022 value);
3023}
3024
3025/* The "record pt buffer-size" show value function. */
3026
3027static void
3028show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3029 struct cmd_list_element *c,
3030 const char *value)
3031{
3032 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3033 value);
3034}
3035
afedecd3
MM
3036void _initialize_record_btrace (void);
3037
3038/* Initialize btrace commands. */
3039
3040void
3041_initialize_record_btrace (void)
3042{
f4abbc16
MM
3043 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3044 _("Start branch trace recording."), &record_btrace_cmdlist,
3045 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3046 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3047
f4abbc16
MM
3048 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3049 _("\
3050Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3051The processor stores a from/to record for each branch into a cyclic buffer.\n\
3052This format may not be available on all processors."),
3053 &record_btrace_cmdlist);
3054 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3055
b20a6524
MM
3056 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3057 _("\
bc504a31 3058Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3059This format may not be available on all processors."),
3060 &record_btrace_cmdlist);
3061 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3062
67b5c0c1
MM
3063 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3064 _("Set record options"), &set_record_btrace_cmdlist,
3065 "set record btrace ", 0, &set_record_cmdlist);
3066
3067 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3068 _("Show record options"), &show_record_btrace_cmdlist,
3069 "show record btrace ", 0, &show_record_cmdlist);
3070
3071 add_setshow_enum_cmd ("replay-memory-access", no_class,
3072 replay_memory_access_types, &replay_memory_access, _("\
3073Set what memory accesses are allowed during replay."), _("\
3074Show what memory accesses are allowed during replay."),
3075 _("Default is READ-ONLY.\n\n\
3076The btrace record target does not trace data.\n\
3077The memory therefore corresponds to the live target and not \
3078to the current replay position.\n\n\
3079When READ-ONLY, allow accesses to read-only memory during replay.\n\
3080When READ-WRITE, allow accesses to read-only and read-write memory during \
3081replay."),
3082 NULL, cmd_show_replay_memory_access,
3083 &set_record_btrace_cmdlist,
3084 &show_record_btrace_cmdlist);
3085
d33501a5
MM
3086 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3087 _("Set record btrace bts options"),
3088 &set_record_btrace_bts_cmdlist,
3089 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3090
3091 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3092 _("Show record btrace bts options"),
3093 &show_record_btrace_bts_cmdlist,
3094 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3095
3096 add_setshow_uinteger_cmd ("buffer-size", no_class,
3097 &record_btrace_conf.bts.size,
3098 _("Set the record/replay bts buffer size."),
3099 _("Show the record/replay bts buffer size."), _("\
3100When starting recording request a trace buffer of this size. \
3101The actual buffer size may differ from the requested size. \
3102Use \"info record\" to see the actual buffer size.\n\n\
3103Bigger buffers allow longer recording but also take more time to process \
3104the recorded execution trace.\n\n\
b20a6524
MM
3105The trace buffer size may not be changed while recording."), NULL,
3106 show_record_bts_buffer_size_value,
d33501a5
MM
3107 &set_record_btrace_bts_cmdlist,
3108 &show_record_btrace_bts_cmdlist);
3109
b20a6524
MM
3110 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3111 _("Set record btrace pt options"),
3112 &set_record_btrace_pt_cmdlist,
3113 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3114
3115 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3116 _("Show record btrace pt options"),
3117 &show_record_btrace_pt_cmdlist,
3118 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3119
3120 add_setshow_uinteger_cmd ("buffer-size", no_class,
3121 &record_btrace_conf.pt.size,
3122 _("Set the record/replay pt buffer size."),
3123 _("Show the record/replay pt buffer size."), _("\
3124Bigger buffers allow longer recording but also take more time to process \
3125the recorded execution.\n\
3126The actual buffer size may differ from the requested size. Use \"info record\" \
3127to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3128 &set_record_btrace_pt_cmdlist,
3129 &show_record_btrace_pt_cmdlist);
3130
afedecd3
MM
3131 init_record_btrace_ops ();
3132 add_target (&record_btrace_ops);
0b722aec
MM
3133
3134 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3135 xcalloc, xfree);
d33501a5
MM
3136
3137 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3138 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3139}