]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
btrace: allow recording to be started (and stopped) for running threads
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
61baf725 3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
162/* Callback function to disable branch tracing for one thread. */
163
164static void
165record_btrace_disable_callback (void *arg)
166{
19ba03f4 167 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
168
169 btrace_disable (tp);
170}
171
172/* Enable automatic tracing of new threads. */
173
174static void
175record_btrace_auto_enable (void)
176{
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181}
182
183/* Disable automatic tracing of new threads. */
184
185static void
186record_btrace_auto_disable (void)
187{
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196}
197
70ad5bff
MM
198/* The record-btrace async event handler function. */
199
200static void
201record_btrace_handle_async_inferior_event (gdb_client_data data)
202{
203 inferior_event_handler (INF_REG_EVENT, NULL);
204}
205
c0272db5
TW
206/* See record-btrace.h. */
207
208void
209record_btrace_push_target (void)
210{
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224}
225
afedecd3
MM
226/* The to_open method of target record-btrace. */
227
228static void
014f9477 229record_btrace_open (const char *args, int from_tty)
afedecd3
MM
230{
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
8213266a 236 record_preopen ();
afedecd3
MM
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
afedecd3
MM
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 244 ALL_NON_EXITED_THREADS (tp)
5d5658a1 245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 246 {
f4abbc16 247 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
c0272db5 252 record_btrace_push_target ();
afedecd3
MM
253
254 discard_cleanups (disable_chain);
255}
256
257/* The to_stop_recording method of target record-btrace. */
258
259static void
c6cd7c02 260record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
261{
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
034f788c 268 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271}
272
c0272db5
TW
273/* The to_disconnect method of target record-btrace. */
274
275static void
276record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278{
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286}
287
afedecd3
MM
288/* The to_close method of target record-btrace. */
289
290static void
de90e03d 291record_btrace_close (struct target_ops *self)
afedecd3 292{
568e808b
MM
293 struct thread_info *tp;
294
70ad5bff
MM
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
99c819ee
MM
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
568e808b
MM
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
034f788c 304 ALL_NON_EXITED_THREADS (tp)
568e808b 305 btrace_teardown (tp);
afedecd3
MM
306}
307
b7d2e916
PA
308/* The to_async method of target record-btrace. */
309
310static void
6a3753b3 311record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 312{
6a3753b3 313 if (enable)
b7d2e916
PA
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
6a3753b3 318 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
319}
320
d33501a5
MM
321/* Adjusts the size and returns a human readable size suffix. */
322
323static const char *
324record_btrace_adjust_size (unsigned int *size)
325{
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347}
348
349/* Print a BTS configuration. */
350
351static void
352record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353{
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363}
364
bc504a31 365/* Print an Intel Processor Trace configuration. */
b20a6524
MM
366
367static void
368record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369{
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379}
380
d33501a5
MM
381/* Print a branch tracing configuration. */
382
383static void
384record_btrace_print_conf (const struct btrace_config *conf)
385{
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
b20a6524
MM
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
d33501a5
MM
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404}
405
afedecd3
MM
406/* The to_info_record method of target record-btrace. */
407
408static void
630d6a4a 409record_btrace_info (struct target_ops *self)
afedecd3
MM
410{
411 struct btrace_thread_info *btinfo;
f4abbc16 412 const struct btrace_config *conf;
afedecd3 413 struct thread_info *tp;
31fd9caa 414 unsigned int insns, calls, gaps;
afedecd3
MM
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
cd4007e4
MM
422 validate_registers_access ();
423
f4abbc16
MM
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
d33501a5 428 record_btrace_print_conf (conf);
f4abbc16 429
afedecd3
MM
430 btrace_fetch (tp);
431
23a7fe75
MM
432 insns = 0;
433 calls = 0;
31fd9caa 434 gaps = 0;
23a7fe75 435
6e07b1d2 436 if (!btrace_is_empty (tp))
23a7fe75
MM
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
5de9129b 443 calls = btrace_call_number (&call);
23a7fe75
MM
444
445 btrace_insn_end (&insn, btinfo);
31fd9caa 446
5de9129b 447 insns = btrace_insn_number (&insn);
31fd9caa
MM
448 if (insns != 0)
449 {
450 /* The last instruction does not really belong to the trace. */
451 insns -= 1;
452 }
453 else
454 {
455 unsigned int steps;
456
457 /* Skip gaps at the end. */
458 do
459 {
460 steps = btrace_insn_prev (&insn, 1);
461 if (steps == 0)
462 break;
463
464 insns = btrace_insn_number (&insn);
465 }
466 while (insns == 0);
467 }
468
469 gaps = btinfo->ngaps;
23a7fe75 470 }
afedecd3 471
31fd9caa 472 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
473 "for thread %s (%s).\n"), insns, calls, gaps,
474 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
475
476 if (btrace_is_replaying (tp))
477 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
478 btrace_insn_number (btinfo->replay));
afedecd3
MM
479}
480
31fd9caa
MM
481/* Print a decode error. */
482
483static void
484btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
485 enum btrace_format format)
486{
487 const char *errstr;
488 int is_error;
489
490 errstr = _("unknown");
491 is_error = 1;
492
493 switch (format)
494 {
495 default:
496 break;
497
498 case BTRACE_FORMAT_BTS:
499 switch (errcode)
500 {
501 default:
502 break;
503
504 case BDE_BTS_OVERFLOW:
505 errstr = _("instruction overflow");
506 break;
507
508 case BDE_BTS_INSN_SIZE:
509 errstr = _("unknown instruction");
510 break;
511 }
512 break;
b20a6524
MM
513
514#if defined (HAVE_LIBIPT)
515 case BTRACE_FORMAT_PT:
516 switch (errcode)
517 {
518 case BDE_PT_USER_QUIT:
519 is_error = 0;
520 errstr = _("trace decode cancelled");
521 break;
522
523 case BDE_PT_DISABLED:
524 is_error = 0;
525 errstr = _("disabled");
526 break;
527
528 case BDE_PT_OVERFLOW:
529 is_error = 0;
530 errstr = _("overflow");
531 break;
532
533 default:
534 if (errcode < 0)
535 errstr = pt_errstr (pt_errcode (errcode));
536 break;
537 }
538 break;
539#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
540 }
541
112e8700 542 uiout->text (_("["));
31fd9caa
MM
543 if (is_error)
544 {
112e8700
SM
545 uiout->text (_("decode error ("));
546 uiout->field_int ("errcode", errcode);
547 uiout->text (_("): "));
31fd9caa 548 }
112e8700
SM
549 uiout->text (errstr);
550 uiout->text (_("]\n"));
31fd9caa
MM
551}
552
afedecd3
MM
553/* Print an unsigned int. */
554
555static void
556ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
557{
112e8700 558 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
559}
560
f94cc897
MM
561/* A range of source lines. */
562
563struct btrace_line_range
564{
565 /* The symtab this line is from. */
566 struct symtab *symtab;
567
568 /* The first line (inclusive). */
569 int begin;
570
571 /* The last line (exclusive). */
572 int end;
573};
574
575/* Construct a line range. */
576
577static struct btrace_line_range
578btrace_mk_line_range (struct symtab *symtab, int begin, int end)
579{
580 struct btrace_line_range range;
581
582 range.symtab = symtab;
583 range.begin = begin;
584 range.end = end;
585
586 return range;
587}
588
589/* Add a line to a line range. */
590
591static struct btrace_line_range
592btrace_line_range_add (struct btrace_line_range range, int line)
593{
594 if (range.end <= range.begin)
595 {
596 /* This is the first entry. */
597 range.begin = line;
598 range.end = line + 1;
599 }
600 else if (line < range.begin)
601 range.begin = line;
602 else if (range.end < line)
603 range.end = line;
604
605 return range;
606}
607
608/* Return non-zero if RANGE is empty, zero otherwise. */
609
610static int
611btrace_line_range_is_empty (struct btrace_line_range range)
612{
613 return range.end <= range.begin;
614}
615
616/* Return non-zero if LHS contains RHS, zero otherwise. */
617
618static int
619btrace_line_range_contains_range (struct btrace_line_range lhs,
620 struct btrace_line_range rhs)
621{
622 return ((lhs.symtab == rhs.symtab)
623 && (lhs.begin <= rhs.begin)
624 && (rhs.end <= lhs.end));
625}
626
627/* Find the line range associated with PC. */
628
629static struct btrace_line_range
630btrace_find_line_range (CORE_ADDR pc)
631{
632 struct btrace_line_range range;
633 struct linetable_entry *lines;
634 struct linetable *ltable;
635 struct symtab *symtab;
636 int nlines, i;
637
638 symtab = find_pc_line_symtab (pc);
639 if (symtab == NULL)
640 return btrace_mk_line_range (NULL, 0, 0);
641
642 ltable = SYMTAB_LINETABLE (symtab);
643 if (ltable == NULL)
644 return btrace_mk_line_range (symtab, 0, 0);
645
646 nlines = ltable->nitems;
647 lines = ltable->item;
648 if (nlines <= 0)
649 return btrace_mk_line_range (symtab, 0, 0);
650
651 range = btrace_mk_line_range (symtab, 0, 0);
652 for (i = 0; i < nlines - 1; i++)
653 {
654 if ((lines[i].pc == pc) && (lines[i].line != 0))
655 range = btrace_line_range_add (range, lines[i].line);
656 }
657
658 return range;
659}
660
661/* Print source lines in LINES to UIOUT.
662
663 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
664 instructions corresponding to that source line. When printing a new source
665 line, we do the cleanups for the open chain and open a new cleanup chain for
666 the new source line. If the source line range in LINES is not empty, this
667 function will leave the cleanup chain for the last printed source line open
668 so instructions can be added to it. */
669
670static void
671btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
672 struct cleanup **ui_item_chain, int flags)
673{
8d297bbf 674 print_source_lines_flags psl_flags;
f94cc897
MM
675 int line;
676
677 psl_flags = 0;
678 if (flags & DISASSEMBLY_FILENAME)
679 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
680
681 for (line = lines.begin; line < lines.end; ++line)
682 {
683 if (*ui_item_chain != NULL)
684 do_cleanups (*ui_item_chain);
685
686 *ui_item_chain
687 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
688
689 print_source_lines (lines.symtab, line, line + 1, psl_flags);
690
691 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
692 }
693}
694
afedecd3
MM
695/* Disassemble a section of the recorded instruction trace. */
696
697static void
23a7fe75 698btrace_insn_history (struct ui_out *uiout,
31fd9caa 699 const struct btrace_thread_info *btinfo,
23a7fe75
MM
700 const struct btrace_insn_iterator *begin,
701 const struct btrace_insn_iterator *end, int flags)
afedecd3 702{
f94cc897
MM
703 struct ui_file *stb;
704 struct cleanup *cleanups, *ui_item_chain;
afedecd3 705 struct gdbarch *gdbarch;
23a7fe75 706 struct btrace_insn_iterator it;
f94cc897 707 struct btrace_line_range last_lines;
afedecd3 708
23a7fe75
MM
709 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
710 btrace_insn_number (end));
afedecd3 711
f94cc897
MM
712 flags |= DISASSEMBLY_SPECULATIVE;
713
afedecd3 714 gdbarch = target_gdbarch ();
f94cc897
MM
715 stb = mem_fileopen ();
716 cleanups = make_cleanup_ui_file_delete (stb);
e47ad6c0 717 gdb_disassembler di (gdbarch, stb);
f94cc897
MM
718 last_lines = btrace_mk_line_range (NULL, 0, 0);
719
720 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
721
722 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
723 instructions corresponding to that line. */
724 ui_item_chain = NULL;
afedecd3 725
23a7fe75 726 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 727 {
23a7fe75
MM
728 const struct btrace_insn *insn;
729
730 insn = btrace_insn_get (&it);
731
31fd9caa
MM
732 /* A NULL instruction indicates a gap in the trace. */
733 if (insn == NULL)
734 {
735 const struct btrace_config *conf;
736
737 conf = btrace_conf (btinfo);
afedecd3 738
31fd9caa
MM
739 /* We have trace so we must have a configuration. */
740 gdb_assert (conf != NULL);
741
742 btrace_ui_out_decode_error (uiout, it.function->errcode,
743 conf->format);
744 }
745 else
746 {
f94cc897 747 struct disasm_insn dinsn;
da8c46d2 748
f94cc897 749 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 750 {
f94cc897
MM
751 struct btrace_line_range lines;
752
753 lines = btrace_find_line_range (insn->pc);
754 if (!btrace_line_range_is_empty (lines)
755 && !btrace_line_range_contains_range (last_lines, lines))
756 {
757 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
758 last_lines = lines;
759 }
760 else if (ui_item_chain == NULL)
761 {
762 ui_item_chain
763 = make_cleanup_ui_out_tuple_begin_end (uiout,
764 "src_and_asm_line");
765 /* No source information. */
766 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
767 }
768
769 gdb_assert (ui_item_chain != NULL);
da8c46d2 770 }
da8c46d2 771
f94cc897
MM
772 memset (&dinsn, 0, sizeof (dinsn));
773 dinsn.number = btrace_insn_number (&it);
774 dinsn.addr = insn->pc;
31fd9caa 775
da8c46d2 776 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 777 dinsn.is_speculative = 1;
da8c46d2 778
e47ad6c0 779 di.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 780 }
afedecd3 781 }
f94cc897
MM
782
783 do_cleanups (cleanups);
afedecd3
MM
784}
785
786/* The to_insn_history method of target record-btrace. */
787
788static void
7a6c5609 789record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
790{
791 struct btrace_thread_info *btinfo;
23a7fe75
MM
792 struct btrace_insn_history *history;
793 struct btrace_insn_iterator begin, end;
afedecd3
MM
794 struct cleanup *uiout_cleanup;
795 struct ui_out *uiout;
23a7fe75 796 unsigned int context, covered;
afedecd3
MM
797
798 uiout = current_uiout;
799 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
800 "insn history");
afedecd3 801 context = abs (size);
afedecd3
MM
802 if (context == 0)
803 error (_("Bad record instruction-history-size."));
804
23a7fe75
MM
805 btinfo = require_btrace ();
806 history = btinfo->insn_history;
807 if (history == NULL)
afedecd3 808 {
07bbe694 809 struct btrace_insn_iterator *replay;
afedecd3 810
23a7fe75 811 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 812
07bbe694
MM
813 /* If we're replaying, we start at the replay position. Otherwise, we
814 start at the tail of the trace. */
815 replay = btinfo->replay;
816 if (replay != NULL)
817 begin = *replay;
818 else
819 btrace_insn_end (&begin, btinfo);
820
821 /* We start from here and expand in the requested direction. Then we
822 expand in the other direction, as well, to fill up any remaining
823 context. */
824 end = begin;
825 if (size < 0)
826 {
827 /* We want the current position covered, as well. */
828 covered = btrace_insn_next (&end, 1);
829 covered += btrace_insn_prev (&begin, context - covered);
830 covered += btrace_insn_next (&end, context - covered);
831 }
832 else
833 {
834 covered = btrace_insn_next (&end, context);
835 covered += btrace_insn_prev (&begin, context - covered);
836 }
afedecd3
MM
837 }
838 else
839 {
23a7fe75
MM
840 begin = history->begin;
841 end = history->end;
afedecd3 842
23a7fe75
MM
843 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
844 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 845
23a7fe75
MM
846 if (size < 0)
847 {
848 end = begin;
849 covered = btrace_insn_prev (&begin, context);
850 }
851 else
852 {
853 begin = end;
854 covered = btrace_insn_next (&end, context);
855 }
afedecd3
MM
856 }
857
23a7fe75 858 if (covered > 0)
31fd9caa 859 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
860 else
861 {
862 if (size < 0)
863 printf_unfiltered (_("At the start of the branch trace record.\n"));
864 else
865 printf_unfiltered (_("At the end of the branch trace record.\n"));
866 }
afedecd3 867
23a7fe75 868 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
869 do_cleanups (uiout_cleanup);
870}
871
872/* The to_insn_history_range method of target record-btrace. */
873
874static void
4e99c6b7
TT
875record_btrace_insn_history_range (struct target_ops *self,
876 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
877{
878 struct btrace_thread_info *btinfo;
23a7fe75
MM
879 struct btrace_insn_history *history;
880 struct btrace_insn_iterator begin, end;
afedecd3
MM
881 struct cleanup *uiout_cleanup;
882 struct ui_out *uiout;
23a7fe75
MM
883 unsigned int low, high;
884 int found;
afedecd3
MM
885
886 uiout = current_uiout;
887 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
888 "insn history");
23a7fe75
MM
889 low = from;
890 high = to;
afedecd3 891
23a7fe75 892 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
893
894 /* Check for wrap-arounds. */
23a7fe75 895 if (low != from || high != to)
afedecd3
MM
896 error (_("Bad range."));
897
0688d04e 898 if (high < low)
afedecd3
MM
899 error (_("Bad range."));
900
23a7fe75 901 btinfo = require_btrace ();
afedecd3 902
23a7fe75
MM
903 found = btrace_find_insn_by_number (&begin, btinfo, low);
904 if (found == 0)
905 error (_("Range out of bounds."));
afedecd3 906
23a7fe75
MM
907 found = btrace_find_insn_by_number (&end, btinfo, high);
908 if (found == 0)
0688d04e
MM
909 {
910 /* Silently truncate the range. */
911 btrace_insn_end (&end, btinfo);
912 }
913 else
914 {
915 /* We want both begin and end to be inclusive. */
916 btrace_insn_next (&end, 1);
917 }
afedecd3 918
31fd9caa 919 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 920 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
921
922 do_cleanups (uiout_cleanup);
923}
924
925/* The to_insn_history_from method of target record-btrace. */
926
927static void
9abc3ff3
TT
928record_btrace_insn_history_from (struct target_ops *self,
929 ULONGEST from, int size, int flags)
afedecd3
MM
930{
931 ULONGEST begin, end, context;
932
933 context = abs (size);
0688d04e
MM
934 if (context == 0)
935 error (_("Bad record instruction-history-size."));
afedecd3
MM
936
937 if (size < 0)
938 {
939 end = from;
940
941 if (from < context)
942 begin = 0;
943 else
0688d04e 944 begin = from - context + 1;
afedecd3
MM
945 }
946 else
947 {
948 begin = from;
0688d04e 949 end = from + context - 1;
afedecd3
MM
950
951 /* Check for wrap-around. */
952 if (end < begin)
953 end = ULONGEST_MAX;
954 }
955
4e99c6b7 956 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
957}
958
959/* Print the instruction number range for a function call history line. */
960
961static void
23a7fe75
MM
962btrace_call_history_insn_range (struct ui_out *uiout,
963 const struct btrace_function *bfun)
afedecd3 964{
7acbe133
MM
965 unsigned int begin, end, size;
966
967 size = VEC_length (btrace_insn_s, bfun->insn);
968 gdb_assert (size > 0);
afedecd3 969
23a7fe75 970 begin = bfun->insn_offset;
7acbe133 971 end = begin + size - 1;
afedecd3 972
23a7fe75 973 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 974 uiout->text (",");
23a7fe75 975 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
976}
977
ce0dfbea
MM
978/* Compute the lowest and highest source line for the instructions in BFUN
979 and return them in PBEGIN and PEND.
980 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
981 result from inlining or macro expansion. */
982
983static void
984btrace_compute_src_line_range (const struct btrace_function *bfun,
985 int *pbegin, int *pend)
986{
987 struct btrace_insn *insn;
988 struct symtab *symtab;
989 struct symbol *sym;
990 unsigned int idx;
991 int begin, end;
992
993 begin = INT_MAX;
994 end = INT_MIN;
995
996 sym = bfun->sym;
997 if (sym == NULL)
998 goto out;
999
1000 symtab = symbol_symtab (sym);
1001
1002 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
1003 {
1004 struct symtab_and_line sal;
1005
1006 sal = find_pc_line (insn->pc, 0);
1007 if (sal.symtab != symtab || sal.line == 0)
1008 continue;
1009
325fac50
PA
1010 begin = std::min (begin, sal.line);
1011 end = std::max (end, sal.line);
ce0dfbea
MM
1012 }
1013
1014 out:
1015 *pbegin = begin;
1016 *pend = end;
1017}
1018
afedecd3
MM
1019/* Print the source line information for a function call history line. */
1020
1021static void
23a7fe75
MM
1022btrace_call_history_src_line (struct ui_out *uiout,
1023 const struct btrace_function *bfun)
afedecd3
MM
1024{
1025 struct symbol *sym;
23a7fe75 1026 int begin, end;
afedecd3
MM
1027
1028 sym = bfun->sym;
1029 if (sym == NULL)
1030 return;
1031
112e8700 1032 uiout->field_string ("file",
08be3fe3 1033 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1034
ce0dfbea 1035 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1036 if (end < begin)
afedecd3
MM
1037 return;
1038
112e8700
SM
1039 uiout->text (":");
1040 uiout->field_int ("min line", begin);
afedecd3 1041
23a7fe75 1042 if (end == begin)
afedecd3
MM
1043 return;
1044
112e8700
SM
1045 uiout->text (",");
1046 uiout->field_int ("max line", end);
afedecd3
MM
1047}
1048
0b722aec
MM
1049/* Get the name of a branch trace function. */
1050
1051static const char *
1052btrace_get_bfun_name (const struct btrace_function *bfun)
1053{
1054 struct minimal_symbol *msym;
1055 struct symbol *sym;
1056
1057 if (bfun == NULL)
1058 return "??";
1059
1060 msym = bfun->msym;
1061 sym = bfun->sym;
1062
1063 if (sym != NULL)
1064 return SYMBOL_PRINT_NAME (sym);
1065 else if (msym != NULL)
efd66ac6 1066 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1067 else
1068 return "??";
1069}
1070
afedecd3
MM
1071/* Disassemble a section of the recorded function trace. */
1072
1073static void
23a7fe75 1074btrace_call_history (struct ui_out *uiout,
8710b709 1075 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1076 const struct btrace_call_iterator *begin,
1077 const struct btrace_call_iterator *end,
8d297bbf 1078 int int_flags)
afedecd3 1079{
23a7fe75 1080 struct btrace_call_iterator it;
8d297bbf 1081 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1082
8d297bbf 1083 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1084 btrace_call_number (end));
afedecd3 1085
23a7fe75 1086 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1087 {
23a7fe75
MM
1088 const struct btrace_function *bfun;
1089 struct minimal_symbol *msym;
1090 struct symbol *sym;
1091
1092 bfun = btrace_call_get (&it);
23a7fe75 1093 sym = bfun->sym;
0b722aec 1094 msym = bfun->msym;
23a7fe75 1095
afedecd3 1096 /* Print the function index. */
23a7fe75 1097 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1098 uiout->text ("\t");
afedecd3 1099
31fd9caa
MM
1100 /* Indicate gaps in the trace. */
1101 if (bfun->errcode != 0)
1102 {
1103 const struct btrace_config *conf;
1104
1105 conf = btrace_conf (btinfo);
1106
1107 /* We have trace so we must have a configuration. */
1108 gdb_assert (conf != NULL);
1109
1110 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1111
1112 continue;
1113 }
1114
8710b709
MM
1115 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1116 {
1117 int level = bfun->level + btinfo->level, i;
1118
1119 for (i = 0; i < level; ++i)
112e8700 1120 uiout->text (" ");
8710b709
MM
1121 }
1122
1123 if (sym != NULL)
112e8700 1124 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1125 else if (msym != NULL)
112e8700
SM
1126 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1127 else if (!uiout->is_mi_like_p ())
1128 uiout->field_string ("function", "??");
8710b709 1129
1e038f67 1130 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1131 {
112e8700 1132 uiout->text (_("\tinst "));
23a7fe75 1133 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1134 }
1135
1e038f67 1136 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1137 {
112e8700 1138 uiout->text (_("\tat "));
23a7fe75 1139 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1140 }
1141
112e8700 1142 uiout->text ("\n");
afedecd3
MM
1143 }
1144}
1145
1146/* The to_call_history method of target record-btrace. */
1147
1148static void
8d297bbf 1149record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1150{
1151 struct btrace_thread_info *btinfo;
23a7fe75
MM
1152 struct btrace_call_history *history;
1153 struct btrace_call_iterator begin, end;
afedecd3
MM
1154 struct cleanup *uiout_cleanup;
1155 struct ui_out *uiout;
23a7fe75 1156 unsigned int context, covered;
8d297bbf 1157 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1158
1159 uiout = current_uiout;
1160 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1161 "insn history");
afedecd3 1162 context = abs (size);
afedecd3
MM
1163 if (context == 0)
1164 error (_("Bad record function-call-history-size."));
1165
23a7fe75
MM
1166 btinfo = require_btrace ();
1167 history = btinfo->call_history;
1168 if (history == NULL)
afedecd3 1169 {
07bbe694 1170 struct btrace_insn_iterator *replay;
afedecd3 1171
8d297bbf 1172 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1173
07bbe694
MM
1174 /* If we're replaying, we start at the replay position. Otherwise, we
1175 start at the tail of the trace. */
1176 replay = btinfo->replay;
1177 if (replay != NULL)
1178 {
1179 begin.function = replay->function;
1180 begin.btinfo = btinfo;
1181 }
1182 else
1183 btrace_call_end (&begin, btinfo);
1184
1185 /* We start from here and expand in the requested direction. Then we
1186 expand in the other direction, as well, to fill up any remaining
1187 context. */
1188 end = begin;
1189 if (size < 0)
1190 {
1191 /* We want the current position covered, as well. */
1192 covered = btrace_call_next (&end, 1);
1193 covered += btrace_call_prev (&begin, context - covered);
1194 covered += btrace_call_next (&end, context - covered);
1195 }
1196 else
1197 {
1198 covered = btrace_call_next (&end, context);
1199 covered += btrace_call_prev (&begin, context- covered);
1200 }
afedecd3
MM
1201 }
1202 else
1203 {
23a7fe75
MM
1204 begin = history->begin;
1205 end = history->end;
afedecd3 1206
8d297bbf 1207 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1208 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1209
23a7fe75
MM
1210 if (size < 0)
1211 {
1212 end = begin;
1213 covered = btrace_call_prev (&begin, context);
1214 }
1215 else
1216 {
1217 begin = end;
1218 covered = btrace_call_next (&end, context);
1219 }
afedecd3
MM
1220 }
1221
23a7fe75 1222 if (covered > 0)
8710b709 1223 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1224 else
1225 {
1226 if (size < 0)
1227 printf_unfiltered (_("At the start of the branch trace record.\n"));
1228 else
1229 printf_unfiltered (_("At the end of the branch trace record.\n"));
1230 }
afedecd3 1231
23a7fe75 1232 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1233 do_cleanups (uiout_cleanup);
1234}
1235
1236/* The to_call_history_range method of target record-btrace. */
1237
1238static void
f0d960ea 1239record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1240 ULONGEST from, ULONGEST to,
1241 int int_flags)
afedecd3
MM
1242{
1243 struct btrace_thread_info *btinfo;
23a7fe75
MM
1244 struct btrace_call_history *history;
1245 struct btrace_call_iterator begin, end;
afedecd3
MM
1246 struct cleanup *uiout_cleanup;
1247 struct ui_out *uiout;
23a7fe75
MM
1248 unsigned int low, high;
1249 int found;
8d297bbf 1250 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1251
1252 uiout = current_uiout;
1253 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1254 "func history");
23a7fe75
MM
1255 low = from;
1256 high = to;
afedecd3 1257
8d297bbf 1258 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1259
1260 /* Check for wrap-arounds. */
23a7fe75 1261 if (low != from || high != to)
afedecd3
MM
1262 error (_("Bad range."));
1263
0688d04e 1264 if (high < low)
afedecd3
MM
1265 error (_("Bad range."));
1266
23a7fe75 1267 btinfo = require_btrace ();
afedecd3 1268
23a7fe75
MM
1269 found = btrace_find_call_by_number (&begin, btinfo, low);
1270 if (found == 0)
1271 error (_("Range out of bounds."));
afedecd3 1272
23a7fe75
MM
1273 found = btrace_find_call_by_number (&end, btinfo, high);
1274 if (found == 0)
0688d04e
MM
1275 {
1276 /* Silently truncate the range. */
1277 btrace_call_end (&end, btinfo);
1278 }
1279 else
1280 {
1281 /* We want both begin and end to be inclusive. */
1282 btrace_call_next (&end, 1);
1283 }
afedecd3 1284
8710b709 1285 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1286 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1287
1288 do_cleanups (uiout_cleanup);
1289}
1290
1291/* The to_call_history_from method of target record-btrace. */
1292
1293static void
ec0aea04 1294record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1295 ULONGEST from, int size,
1296 int int_flags)
afedecd3
MM
1297{
1298 ULONGEST begin, end, context;
8d297bbf 1299 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1300
1301 context = abs (size);
0688d04e
MM
1302 if (context == 0)
1303 error (_("Bad record function-call-history-size."));
afedecd3
MM
1304
1305 if (size < 0)
1306 {
1307 end = from;
1308
1309 if (from < context)
1310 begin = 0;
1311 else
0688d04e 1312 begin = from - context + 1;
afedecd3
MM
1313 }
1314 else
1315 {
1316 begin = from;
0688d04e 1317 end = from + context - 1;
afedecd3
MM
1318
1319 /* Check for wrap-around. */
1320 if (end < begin)
1321 end = ULONGEST_MAX;
1322 }
1323
f0d960ea 1324 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1325}
1326
07bbe694
MM
1327/* The to_record_is_replaying method of target record-btrace. */
1328
1329static int
a52eab48 1330record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1331{
1332 struct thread_info *tp;
1333
034f788c 1334 ALL_NON_EXITED_THREADS (tp)
a52eab48 1335 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1336 return 1;
1337
1338 return 0;
1339}
1340
7ff27e9b
MM
1341/* The to_record_will_replay method of target record-btrace. */
1342
1343static int
1344record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1345{
1346 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1347}
1348
633785ff
MM
1349/* The to_xfer_partial method of target record-btrace. */
1350
9b409511 1351static enum target_xfer_status
633785ff
MM
1352record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1353 const char *annex, gdb_byte *readbuf,
1354 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1355 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1356{
1357 struct target_ops *t;
1358
1359 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1360 if (replay_memory_access == replay_memory_access_read_only
aef92902 1361 && !record_btrace_generating_corefile
4d10e986 1362 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1363 {
1364 switch (object)
1365 {
1366 case TARGET_OBJECT_MEMORY:
1367 {
1368 struct target_section *section;
1369
1370 /* We do not allow writing memory in general. */
1371 if (writebuf != NULL)
9b409511
YQ
1372 {
1373 *xfered_len = len;
bc113b4e 1374 return TARGET_XFER_UNAVAILABLE;
9b409511 1375 }
633785ff
MM
1376
1377 /* We allow reading readonly memory. */
1378 section = target_section_by_addr (ops, offset);
1379 if (section != NULL)
1380 {
1381 /* Check if the section we found is readonly. */
1382 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1383 section->the_bfd_section)
1384 & SEC_READONLY) != 0)
1385 {
1386 /* Truncate the request to fit into this section. */
325fac50 1387 len = std::min (len, section->endaddr - offset);
633785ff
MM
1388 break;
1389 }
1390 }
1391
9b409511 1392 *xfered_len = len;
bc113b4e 1393 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1394 }
1395 }
1396 }
1397
1398 /* Forward the request. */
e75fdfca
TT
1399 ops = ops->beneath;
1400 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1401 offset, len, xfered_len);
633785ff
MM
1402}
1403
1404/* The to_insert_breakpoint method of target record-btrace. */
1405
1406static int
1407record_btrace_insert_breakpoint (struct target_ops *ops,
1408 struct gdbarch *gdbarch,
1409 struct bp_target_info *bp_tgt)
1410{
67b5c0c1
MM
1411 const char *old;
1412 int ret;
633785ff
MM
1413
1414 /* Inserting breakpoints requires accessing memory. Allow it for the
1415 duration of this function. */
67b5c0c1
MM
1416 old = replay_memory_access;
1417 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1418
1419 ret = 0;
492d29ea
PA
1420 TRY
1421 {
1422 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1423 }
492d29ea
PA
1424 CATCH (except, RETURN_MASK_ALL)
1425 {
6c63c96a 1426 replay_memory_access = old;
492d29ea
PA
1427 throw_exception (except);
1428 }
1429 END_CATCH
6c63c96a 1430 replay_memory_access = old;
633785ff
MM
1431
1432 return ret;
1433}
1434
1435/* The to_remove_breakpoint method of target record-btrace. */
1436
1437static int
1438record_btrace_remove_breakpoint (struct target_ops *ops,
1439 struct gdbarch *gdbarch,
73971819
PA
1440 struct bp_target_info *bp_tgt,
1441 enum remove_bp_reason reason)
633785ff 1442{
67b5c0c1
MM
1443 const char *old;
1444 int ret;
633785ff
MM
1445
1446 /* Removing breakpoints requires accessing memory. Allow it for the
1447 duration of this function. */
67b5c0c1
MM
1448 old = replay_memory_access;
1449 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1450
1451 ret = 0;
492d29ea
PA
1452 TRY
1453 {
73971819
PA
1454 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1455 reason);
492d29ea 1456 }
492d29ea
PA
1457 CATCH (except, RETURN_MASK_ALL)
1458 {
6c63c96a 1459 replay_memory_access = old;
492d29ea
PA
1460 throw_exception (except);
1461 }
1462 END_CATCH
6c63c96a 1463 replay_memory_access = old;
633785ff
MM
1464
1465 return ret;
1466}
1467
1f3ef581
MM
1468/* The to_fetch_registers method of target record-btrace. */
1469
1470static void
1471record_btrace_fetch_registers (struct target_ops *ops,
1472 struct regcache *regcache, int regno)
1473{
1474 struct btrace_insn_iterator *replay;
1475 struct thread_info *tp;
1476
1477 tp = find_thread_ptid (inferior_ptid);
1478 gdb_assert (tp != NULL);
1479
1480 replay = tp->btrace.replay;
aef92902 1481 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1482 {
1483 const struct btrace_insn *insn;
1484 struct gdbarch *gdbarch;
1485 int pcreg;
1486
1487 gdbarch = get_regcache_arch (regcache);
1488 pcreg = gdbarch_pc_regnum (gdbarch);
1489 if (pcreg < 0)
1490 return;
1491
1492 /* We can only provide the PC register. */
1493 if (regno >= 0 && regno != pcreg)
1494 return;
1495
1496 insn = btrace_insn_get (replay);
1497 gdb_assert (insn != NULL);
1498
1499 regcache_raw_supply (regcache, regno, &insn->pc);
1500 }
1501 else
1502 {
e75fdfca 1503 struct target_ops *t = ops->beneath;
1f3ef581 1504
e75fdfca 1505 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1506 }
1507}
1508
1509/* The to_store_registers method of target record-btrace. */
1510
1511static void
1512record_btrace_store_registers (struct target_ops *ops,
1513 struct regcache *regcache, int regno)
1514{
1515 struct target_ops *t;
1516
a52eab48 1517 if (!record_btrace_generating_corefile
4d10e986
MM
1518 && record_btrace_is_replaying (ops, inferior_ptid))
1519 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1520
1521 gdb_assert (may_write_registers != 0);
1522
e75fdfca
TT
1523 t = ops->beneath;
1524 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1525}
1526
1527/* The to_prepare_to_store method of target record-btrace. */
1528
1529static void
1530record_btrace_prepare_to_store (struct target_ops *ops,
1531 struct regcache *regcache)
1532{
1533 struct target_ops *t;
1534
a52eab48 1535 if (!record_btrace_generating_corefile
4d10e986 1536 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1537 return;
1538
e75fdfca
TT
1539 t = ops->beneath;
1540 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1541}
1542
0b722aec
MM
1543/* The branch trace frame cache. */
1544
1545struct btrace_frame_cache
1546{
1547 /* The thread. */
1548 struct thread_info *tp;
1549
1550 /* The frame info. */
1551 struct frame_info *frame;
1552
1553 /* The branch trace function segment. */
1554 const struct btrace_function *bfun;
1555};
1556
1557/* A struct btrace_frame_cache hash table indexed by NEXT. */
1558
1559static htab_t bfcache;
1560
1561/* hash_f for htab_create_alloc of bfcache. */
1562
1563static hashval_t
1564bfcache_hash (const void *arg)
1565{
19ba03f4
SM
1566 const struct btrace_frame_cache *cache
1567 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1568
1569 return htab_hash_pointer (cache->frame);
1570}
1571
1572/* eq_f for htab_create_alloc of bfcache. */
1573
1574static int
1575bfcache_eq (const void *arg1, const void *arg2)
1576{
19ba03f4
SM
1577 const struct btrace_frame_cache *cache1
1578 = (const struct btrace_frame_cache *) arg1;
1579 const struct btrace_frame_cache *cache2
1580 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1581
1582 return cache1->frame == cache2->frame;
1583}
1584
1585/* Create a new btrace frame cache. */
1586
1587static struct btrace_frame_cache *
1588bfcache_new (struct frame_info *frame)
1589{
1590 struct btrace_frame_cache *cache;
1591 void **slot;
1592
1593 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1594 cache->frame = frame;
1595
1596 slot = htab_find_slot (bfcache, cache, INSERT);
1597 gdb_assert (*slot == NULL);
1598 *slot = cache;
1599
1600 return cache;
1601}
1602
1603/* Extract the branch trace function from a branch trace frame. */
1604
1605static const struct btrace_function *
1606btrace_get_frame_function (struct frame_info *frame)
1607{
1608 const struct btrace_frame_cache *cache;
1609 const struct btrace_function *bfun;
1610 struct btrace_frame_cache pattern;
1611 void **slot;
1612
1613 pattern.frame = frame;
1614
1615 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1616 if (slot == NULL)
1617 return NULL;
1618
19ba03f4 1619 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1620 return cache->bfun;
1621}
1622
cecac1ab
MM
1623/* Implement stop_reason method for record_btrace_frame_unwind. */
1624
1625static enum unwind_stop_reason
1626record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1627 void **this_cache)
1628{
0b722aec
MM
1629 const struct btrace_frame_cache *cache;
1630 const struct btrace_function *bfun;
1631
19ba03f4 1632 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1633 bfun = cache->bfun;
1634 gdb_assert (bfun != NULL);
1635
1636 if (bfun->up == NULL)
1637 return UNWIND_UNAVAILABLE;
1638
1639 return UNWIND_NO_REASON;
cecac1ab
MM
1640}
1641
1642/* Implement this_id method for record_btrace_frame_unwind. */
1643
1644static void
1645record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1646 struct frame_id *this_id)
1647{
0b722aec
MM
1648 const struct btrace_frame_cache *cache;
1649 const struct btrace_function *bfun;
1650 CORE_ADDR code, special;
1651
19ba03f4 1652 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1653
1654 bfun = cache->bfun;
1655 gdb_assert (bfun != NULL);
1656
1657 while (bfun->segment.prev != NULL)
1658 bfun = bfun->segment.prev;
1659
1660 code = get_frame_func (this_frame);
1661 special = bfun->number;
1662
1663 *this_id = frame_id_build_unavailable_stack_special (code, special);
1664
1665 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1666 btrace_get_bfun_name (cache->bfun),
1667 core_addr_to_string_nz (this_id->code_addr),
1668 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1669}
1670
1671/* Implement prev_register method for record_btrace_frame_unwind. */
1672
1673static struct value *
1674record_btrace_frame_prev_register (struct frame_info *this_frame,
1675 void **this_cache,
1676 int regnum)
1677{
0b722aec
MM
1678 const struct btrace_frame_cache *cache;
1679 const struct btrace_function *bfun, *caller;
1680 const struct btrace_insn *insn;
1681 struct gdbarch *gdbarch;
1682 CORE_ADDR pc;
1683 int pcreg;
1684
1685 gdbarch = get_frame_arch (this_frame);
1686 pcreg = gdbarch_pc_regnum (gdbarch);
1687 if (pcreg < 0 || regnum != pcreg)
1688 throw_error (NOT_AVAILABLE_ERROR,
1689 _("Registers are not available in btrace record history"));
1690
19ba03f4 1691 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1692 bfun = cache->bfun;
1693 gdb_assert (bfun != NULL);
1694
1695 caller = bfun->up;
1696 if (caller == NULL)
1697 throw_error (NOT_AVAILABLE_ERROR,
1698 _("No caller in btrace record history"));
1699
1700 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1701 {
1702 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1703 pc = insn->pc;
1704 }
1705 else
1706 {
1707 insn = VEC_last (btrace_insn_s, caller->insn);
1708 pc = insn->pc;
1709
1710 pc += gdb_insn_length (gdbarch, pc);
1711 }
1712
1713 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1714 btrace_get_bfun_name (bfun), bfun->level,
1715 core_addr_to_string_nz (pc));
1716
1717 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1718}
1719
1720/* Implement sniffer method for record_btrace_frame_unwind. */
1721
1722static int
1723record_btrace_frame_sniffer (const struct frame_unwind *self,
1724 struct frame_info *this_frame,
1725 void **this_cache)
1726{
0b722aec
MM
1727 const struct btrace_function *bfun;
1728 struct btrace_frame_cache *cache;
cecac1ab 1729 struct thread_info *tp;
0b722aec 1730 struct frame_info *next;
cecac1ab
MM
1731
1732 /* THIS_FRAME does not contain a reference to its thread. */
1733 tp = find_thread_ptid (inferior_ptid);
1734 gdb_assert (tp != NULL);
1735
0b722aec
MM
1736 bfun = NULL;
1737 next = get_next_frame (this_frame);
1738 if (next == NULL)
1739 {
1740 const struct btrace_insn_iterator *replay;
1741
1742 replay = tp->btrace.replay;
1743 if (replay != NULL)
1744 bfun = replay->function;
1745 }
1746 else
1747 {
1748 const struct btrace_function *callee;
1749
1750 callee = btrace_get_frame_function (next);
1751 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1752 bfun = callee->up;
1753 }
1754
1755 if (bfun == NULL)
1756 return 0;
1757
1758 DEBUG ("[frame] sniffed frame for %s on level %d",
1759 btrace_get_bfun_name (bfun), bfun->level);
1760
1761 /* This is our frame. Initialize the frame cache. */
1762 cache = bfcache_new (this_frame);
1763 cache->tp = tp;
1764 cache->bfun = bfun;
1765
1766 *this_cache = cache;
1767 return 1;
1768}
1769
1770/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1771
1772static int
1773record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1774 struct frame_info *this_frame,
1775 void **this_cache)
1776{
1777 const struct btrace_function *bfun, *callee;
1778 struct btrace_frame_cache *cache;
1779 struct frame_info *next;
1780
1781 next = get_next_frame (this_frame);
1782 if (next == NULL)
1783 return 0;
1784
1785 callee = btrace_get_frame_function (next);
1786 if (callee == NULL)
1787 return 0;
1788
1789 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1790 return 0;
1791
1792 bfun = callee->up;
1793 if (bfun == NULL)
1794 return 0;
1795
1796 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1797 btrace_get_bfun_name (bfun), bfun->level);
1798
1799 /* This is our frame. Initialize the frame cache. */
1800 cache = bfcache_new (this_frame);
1801 cache->tp = find_thread_ptid (inferior_ptid);
1802 cache->bfun = bfun;
1803
1804 *this_cache = cache;
1805 return 1;
1806}
1807
1808static void
1809record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1810{
1811 struct btrace_frame_cache *cache;
1812 void **slot;
1813
19ba03f4 1814 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1815
1816 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1817 gdb_assert (slot != NULL);
1818
1819 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1820}
1821
1822/* btrace recording does not store previous memory content, neither the stack
1823 frames content. Any unwinding would return errorneous results as the stack
1824 contents no longer matches the changed PC value restored from history.
1825 Therefore this unwinder reports any possibly unwound registers as
1826 <unavailable>. */
1827
0b722aec 1828const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1829{
1830 NORMAL_FRAME,
1831 record_btrace_frame_unwind_stop_reason,
1832 record_btrace_frame_this_id,
1833 record_btrace_frame_prev_register,
1834 NULL,
0b722aec
MM
1835 record_btrace_frame_sniffer,
1836 record_btrace_frame_dealloc_cache
1837};
1838
1839const struct frame_unwind record_btrace_tailcall_frame_unwind =
1840{
1841 TAILCALL_FRAME,
1842 record_btrace_frame_unwind_stop_reason,
1843 record_btrace_frame_this_id,
1844 record_btrace_frame_prev_register,
1845 NULL,
1846 record_btrace_tailcall_frame_sniffer,
1847 record_btrace_frame_dealloc_cache
cecac1ab 1848};
b2f4cfde 1849
ac01945b
TT
1850/* Implement the to_get_unwinder method. */
1851
1852static const struct frame_unwind *
1853record_btrace_to_get_unwinder (struct target_ops *self)
1854{
1855 return &record_btrace_frame_unwind;
1856}
1857
1858/* Implement the to_get_tailcall_unwinder method. */
1859
1860static const struct frame_unwind *
1861record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1862{
1863 return &record_btrace_tailcall_frame_unwind;
1864}
1865
987e68b1
MM
1866/* Return a human-readable string for FLAG. */
1867
1868static const char *
1869btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1870{
1871 switch (flag)
1872 {
1873 case BTHR_STEP:
1874 return "step";
1875
1876 case BTHR_RSTEP:
1877 return "reverse-step";
1878
1879 case BTHR_CONT:
1880 return "cont";
1881
1882 case BTHR_RCONT:
1883 return "reverse-cont";
1884
1885 case BTHR_STOP:
1886 return "stop";
1887 }
1888
1889 return "<invalid>";
1890}
1891
52834460
MM
1892/* Indicate that TP should be resumed according to FLAG. */
1893
1894static void
1895record_btrace_resume_thread (struct thread_info *tp,
1896 enum btrace_thread_flag flag)
1897{
1898 struct btrace_thread_info *btinfo;
1899
43792cf0 1900 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1901 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1902
1903 btinfo = &tp->btrace;
1904
52834460
MM
1905 /* Fetch the latest branch trace. */
1906 btrace_fetch (tp);
1907
0ca912df
MM
1908 /* A resume request overwrites a preceding resume or stop request. */
1909 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1910 btinfo->flags |= flag;
1911}
1912
ec71cc2f
MM
1913/* Get the current frame for TP. */
1914
1915static struct frame_info *
1916get_thread_current_frame (struct thread_info *tp)
1917{
1918 struct frame_info *frame;
1919 ptid_t old_inferior_ptid;
1920 int executing;
1921
1922 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1923 old_inferior_ptid = inferior_ptid;
1924 inferior_ptid = tp->ptid;
1925
1926 /* Clear the executing flag to allow changes to the current frame.
1927 We are not actually running, yet. We just started a reverse execution
1928 command or a record goto command.
1929 For the latter, EXECUTING is false and this has no effect.
1930 For the former, EXECUTING is true and we're in to_wait, about to
1931 move the thread. Since we need to recompute the stack, we temporarily
1932 set EXECUTING to flase. */
1933 executing = is_executing (inferior_ptid);
1934 set_executing (inferior_ptid, 0);
1935
1936 frame = NULL;
1937 TRY
1938 {
1939 frame = get_current_frame ();
1940 }
1941 CATCH (except, RETURN_MASK_ALL)
1942 {
1943 /* Restore the previous execution state. */
1944 set_executing (inferior_ptid, executing);
1945
1946 /* Restore the previous inferior_ptid. */
1947 inferior_ptid = old_inferior_ptid;
1948
1949 throw_exception (except);
1950 }
1951 END_CATCH
1952
1953 /* Restore the previous execution state. */
1954 set_executing (inferior_ptid, executing);
1955
1956 /* Restore the previous inferior_ptid. */
1957 inferior_ptid = old_inferior_ptid;
1958
1959 return frame;
1960}
1961
52834460
MM
1962/* Start replaying a thread. */
1963
1964static struct btrace_insn_iterator *
1965record_btrace_start_replaying (struct thread_info *tp)
1966{
52834460
MM
1967 struct btrace_insn_iterator *replay;
1968 struct btrace_thread_info *btinfo;
52834460
MM
1969
1970 btinfo = &tp->btrace;
1971 replay = NULL;
1972
1973 /* We can't start replaying without trace. */
1974 if (btinfo->begin == NULL)
1975 return NULL;
1976
52834460
MM
1977 /* GDB stores the current frame_id when stepping in order to detects steps
1978 into subroutines.
1979 Since frames are computed differently when we're replaying, we need to
1980 recompute those stored frames and fix them up so we can still detect
1981 subroutines after we started replaying. */
492d29ea 1982 TRY
52834460
MM
1983 {
1984 struct frame_info *frame;
1985 struct frame_id frame_id;
1986 int upd_step_frame_id, upd_step_stack_frame_id;
1987
1988 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1989 frame = get_thread_current_frame (tp);
52834460
MM
1990 frame_id = get_frame_id (frame);
1991
1992 /* Check if we need to update any stepping-related frame id's. */
1993 upd_step_frame_id = frame_id_eq (frame_id,
1994 tp->control.step_frame_id);
1995 upd_step_stack_frame_id = frame_id_eq (frame_id,
1996 tp->control.step_stack_frame_id);
1997
1998 /* We start replaying at the end of the branch trace. This corresponds
1999 to the current instruction. */
8d749320 2000 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
2001 btrace_insn_end (replay, btinfo);
2002
31fd9caa
MM
2003 /* Skip gaps at the end of the trace. */
2004 while (btrace_insn_get (replay) == NULL)
2005 {
2006 unsigned int steps;
2007
2008 steps = btrace_insn_prev (replay, 1);
2009 if (steps == 0)
2010 error (_("No trace."));
2011 }
2012
52834460
MM
2013 /* We're not replaying, yet. */
2014 gdb_assert (btinfo->replay == NULL);
2015 btinfo->replay = replay;
2016
2017 /* Make sure we're not using any stale registers. */
2018 registers_changed_ptid (tp->ptid);
2019
2020 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 2021 frame = get_thread_current_frame (tp);
52834460
MM
2022 frame_id = get_frame_id (frame);
2023
2024 /* Replace stepping related frames where necessary. */
2025 if (upd_step_frame_id)
2026 tp->control.step_frame_id = frame_id;
2027 if (upd_step_stack_frame_id)
2028 tp->control.step_stack_frame_id = frame_id;
2029 }
492d29ea 2030 CATCH (except, RETURN_MASK_ALL)
52834460
MM
2031 {
2032 xfree (btinfo->replay);
2033 btinfo->replay = NULL;
2034
2035 registers_changed_ptid (tp->ptid);
2036
2037 throw_exception (except);
2038 }
492d29ea 2039 END_CATCH
52834460
MM
2040
2041 return replay;
2042}
2043
2044/* Stop replaying a thread. */
2045
2046static void
2047record_btrace_stop_replaying (struct thread_info *tp)
2048{
2049 struct btrace_thread_info *btinfo;
2050
2051 btinfo = &tp->btrace;
2052
2053 xfree (btinfo->replay);
2054 btinfo->replay = NULL;
2055
2056 /* Make sure we're not leaving any stale registers. */
2057 registers_changed_ptid (tp->ptid);
2058}
2059
e3cfc1c7
MM
2060/* Stop replaying TP if it is at the end of its execution history. */
2061
2062static void
2063record_btrace_stop_replaying_at_end (struct thread_info *tp)
2064{
2065 struct btrace_insn_iterator *replay, end;
2066 struct btrace_thread_info *btinfo;
2067
2068 btinfo = &tp->btrace;
2069 replay = btinfo->replay;
2070
2071 if (replay == NULL)
2072 return;
2073
2074 btrace_insn_end (&end, btinfo);
2075
2076 if (btrace_insn_cmp (replay, &end) == 0)
2077 record_btrace_stop_replaying (tp);
2078}
2079
b2f4cfde
MM
2080/* The to_resume method of target record-btrace. */
2081
2082static void
2083record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2084 enum gdb_signal signal)
2085{
0ca912df 2086 struct thread_info *tp;
d2939ba2 2087 enum btrace_thread_flag flag, cflag;
52834460 2088
987e68b1
MM
2089 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2090 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2091 step ? "step" : "cont");
52834460 2092
0ca912df
MM
2093 /* Store the execution direction of the last resume.
2094
2095 If there is more than one to_resume call, we have to rely on infrun
2096 to not change the execution direction in-between. */
70ad5bff
MM
2097 record_btrace_resume_exec_dir = execution_direction;
2098
0ca912df 2099 /* As long as we're not replaying, just forward the request.
52834460 2100
0ca912df
MM
2101 For non-stop targets this means that no thread is replaying. In order to
2102 make progress, we may need to explicitly move replaying threads to the end
2103 of their execution history. */
a52eab48
MM
2104 if ((execution_direction != EXEC_REVERSE)
2105 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2106 {
e75fdfca 2107 ops = ops->beneath;
04c4fe8c
MM
2108 ops->to_resume (ops, ptid, step, signal);
2109 return;
b2f4cfde
MM
2110 }
2111
52834460 2112 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2113 if (execution_direction == EXEC_REVERSE)
2114 {
2115 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2116 cflag = BTHR_RCONT;
2117 }
52834460 2118 else
d2939ba2
MM
2119 {
2120 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2121 cflag = BTHR_CONT;
2122 }
52834460 2123
52834460 2124 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2125 record_btrace_wait below.
2126
2127 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2128 if (!target_is_non_stop_p ())
2129 {
2130 gdb_assert (ptid_match (inferior_ptid, ptid));
2131
2132 ALL_NON_EXITED_THREADS (tp)
2133 if (ptid_match (tp->ptid, ptid))
2134 {
2135 if (ptid_match (tp->ptid, inferior_ptid))
2136 record_btrace_resume_thread (tp, flag);
2137 else
2138 record_btrace_resume_thread (tp, cflag);
2139 }
2140 }
2141 else
2142 {
2143 ALL_NON_EXITED_THREADS (tp)
2144 if (ptid_match (tp->ptid, ptid))
2145 record_btrace_resume_thread (tp, flag);
2146 }
70ad5bff
MM
2147
2148 /* Async support. */
2149 if (target_can_async_p ())
2150 {
6a3753b3 2151 target_async (1);
70ad5bff
MM
2152 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2153 }
52834460
MM
2154}
2155
85ad3aaf
PA
2156/* The to_commit_resume method of target record-btrace. */
2157
2158static void
2159record_btrace_commit_resume (struct target_ops *ops)
2160{
2161 if ((execution_direction != EXEC_REVERSE)
2162 && !record_btrace_is_replaying (ops, minus_one_ptid))
2163 ops->beneath->to_commit_resume (ops->beneath);
2164}
2165
987e68b1
MM
2166/* Cancel resuming TP. */
2167
2168static void
2169record_btrace_cancel_resume (struct thread_info *tp)
2170{
2171 enum btrace_thread_flag flags;
2172
2173 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2174 if (flags == 0)
2175 return;
2176
43792cf0
PA
2177 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2178 print_thread_id (tp),
987e68b1
MM
2179 target_pid_to_str (tp->ptid), flags,
2180 btrace_thread_flag_to_str (flags));
2181
2182 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2183 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2184}
2185
2186/* Return a target_waitstatus indicating that we ran out of history. */
2187
2188static struct target_waitstatus
2189btrace_step_no_history (void)
2190{
2191 struct target_waitstatus status;
2192
2193 status.kind = TARGET_WAITKIND_NO_HISTORY;
2194
2195 return status;
2196}
2197
2198/* Return a target_waitstatus indicating that a step finished. */
2199
2200static struct target_waitstatus
2201btrace_step_stopped (void)
2202{
2203 struct target_waitstatus status;
2204
2205 status.kind = TARGET_WAITKIND_STOPPED;
2206 status.value.sig = GDB_SIGNAL_TRAP;
2207
2208 return status;
2209}
2210
6e4879f0
MM
2211/* Return a target_waitstatus indicating that a thread was stopped as
2212 requested. */
2213
2214static struct target_waitstatus
2215btrace_step_stopped_on_request (void)
2216{
2217 struct target_waitstatus status;
2218
2219 status.kind = TARGET_WAITKIND_STOPPED;
2220 status.value.sig = GDB_SIGNAL_0;
2221
2222 return status;
2223}
2224
d825d248
MM
2225/* Return a target_waitstatus indicating a spurious stop. */
2226
2227static struct target_waitstatus
2228btrace_step_spurious (void)
2229{
2230 struct target_waitstatus status;
2231
2232 status.kind = TARGET_WAITKIND_SPURIOUS;
2233
2234 return status;
2235}
2236
e3cfc1c7
MM
2237/* Return a target_waitstatus indicating that the thread was not resumed. */
2238
2239static struct target_waitstatus
2240btrace_step_no_resumed (void)
2241{
2242 struct target_waitstatus status;
2243
2244 status.kind = TARGET_WAITKIND_NO_RESUMED;
2245
2246 return status;
2247}
2248
2249/* Return a target_waitstatus indicating that we should wait again. */
2250
2251static struct target_waitstatus
2252btrace_step_again (void)
2253{
2254 struct target_waitstatus status;
2255
2256 status.kind = TARGET_WAITKIND_IGNORE;
2257
2258 return status;
2259}
2260
52834460
MM
2261/* Clear the record histories. */
2262
2263static void
2264record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2265{
2266 xfree (btinfo->insn_history);
2267 xfree (btinfo->call_history);
2268
2269 btinfo->insn_history = NULL;
2270 btinfo->call_history = NULL;
2271}
2272
3c615f99
MM
2273/* Check whether TP's current replay position is at a breakpoint. */
2274
2275static int
2276record_btrace_replay_at_breakpoint (struct thread_info *tp)
2277{
2278 struct btrace_insn_iterator *replay;
2279 struct btrace_thread_info *btinfo;
2280 const struct btrace_insn *insn;
2281 struct inferior *inf;
2282
2283 btinfo = &tp->btrace;
2284 replay = btinfo->replay;
2285
2286 if (replay == NULL)
2287 return 0;
2288
2289 insn = btrace_insn_get (replay);
2290 if (insn == NULL)
2291 return 0;
2292
2293 inf = find_inferior_ptid (tp->ptid);
2294 if (inf == NULL)
2295 return 0;
2296
2297 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2298 &btinfo->stop_reason);
2299}
2300
d825d248 2301/* Step one instruction in forward direction. */
52834460
MM
2302
2303static struct target_waitstatus
d825d248 2304record_btrace_single_step_forward (struct thread_info *tp)
52834460 2305{
b61ce85c 2306 struct btrace_insn_iterator *replay, end, start;
52834460 2307 struct btrace_thread_info *btinfo;
52834460 2308
d825d248
MM
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
2312 /* We're done if we're not replaying. */
2313 if (replay == NULL)
2314 return btrace_step_no_history ();
2315
011c71b6
MM
2316 /* Check if we're stepping a breakpoint. */
2317 if (record_btrace_replay_at_breakpoint (tp))
2318 return btrace_step_stopped ();
2319
b61ce85c
MM
2320 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2321 jump back to the instruction at which we started. */
2322 start = *replay;
d825d248
MM
2323 do
2324 {
2325 unsigned int steps;
2326
e3cfc1c7
MM
2327 /* We will bail out here if we continue stepping after reaching the end
2328 of the execution history. */
d825d248
MM
2329 steps = btrace_insn_next (replay, 1);
2330 if (steps == 0)
b61ce85c
MM
2331 {
2332 *replay = start;
2333 return btrace_step_no_history ();
2334 }
d825d248
MM
2335 }
2336 while (btrace_insn_get (replay) == NULL);
2337
2338 /* Determine the end of the instruction trace. */
2339 btrace_insn_end (&end, btinfo);
2340
e3cfc1c7
MM
2341 /* The execution trace contains (and ends with) the current instruction.
2342 This instruction has not been executed, yet, so the trace really ends
2343 one instruction earlier. */
d825d248 2344 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2345 return btrace_step_no_history ();
d825d248
MM
2346
2347 return btrace_step_spurious ();
2348}
2349
2350/* Step one instruction in backward direction. */
2351
2352static struct target_waitstatus
2353record_btrace_single_step_backward (struct thread_info *tp)
2354{
b61ce85c 2355 struct btrace_insn_iterator *replay, start;
d825d248 2356 struct btrace_thread_info *btinfo;
e59fa00f 2357
52834460
MM
2358 btinfo = &tp->btrace;
2359 replay = btinfo->replay;
2360
d825d248
MM
2361 /* Start replaying if we're not already doing so. */
2362 if (replay == NULL)
2363 replay = record_btrace_start_replaying (tp);
2364
2365 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2366 Skip gaps during replay. If we end up at a gap (at the beginning of
2367 the trace), jump back to the instruction at which we started. */
2368 start = *replay;
d825d248
MM
2369 do
2370 {
2371 unsigned int steps;
2372
2373 steps = btrace_insn_prev (replay, 1);
2374 if (steps == 0)
b61ce85c
MM
2375 {
2376 *replay = start;
2377 return btrace_step_no_history ();
2378 }
d825d248
MM
2379 }
2380 while (btrace_insn_get (replay) == NULL);
2381
011c71b6
MM
2382 /* Check if we're stepping a breakpoint.
2383
2384 For reverse-stepping, this check is after the step. There is logic in
2385 infrun.c that handles reverse-stepping separately. See, for example,
2386 proceed and adjust_pc_after_break.
2387
2388 This code assumes that for reverse-stepping, PC points to the last
2389 de-executed instruction, whereas for forward-stepping PC points to the
2390 next to-be-executed instruction. */
2391 if (record_btrace_replay_at_breakpoint (tp))
2392 return btrace_step_stopped ();
2393
d825d248
MM
2394 return btrace_step_spurious ();
2395}
2396
2397/* Step a single thread. */
2398
2399static struct target_waitstatus
2400record_btrace_step_thread (struct thread_info *tp)
2401{
2402 struct btrace_thread_info *btinfo;
2403 struct target_waitstatus status;
2404 enum btrace_thread_flag flags;
2405
2406 btinfo = &tp->btrace;
2407
6e4879f0
MM
2408 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2409 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2410
43792cf0 2411 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2412 target_pid_to_str (tp->ptid), flags,
2413 btrace_thread_flag_to_str (flags));
52834460 2414
6e4879f0
MM
2415 /* We can't step without an execution history. */
2416 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2417 return btrace_step_no_history ();
2418
52834460
MM
2419 switch (flags)
2420 {
2421 default:
2422 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2423
6e4879f0
MM
2424 case BTHR_STOP:
2425 return btrace_step_stopped_on_request ();
2426
52834460 2427 case BTHR_STEP:
d825d248
MM
2428 status = record_btrace_single_step_forward (tp);
2429 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2430 break;
52834460
MM
2431
2432 return btrace_step_stopped ();
2433
2434 case BTHR_RSTEP:
d825d248
MM
2435 status = record_btrace_single_step_backward (tp);
2436 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2437 break;
52834460
MM
2438
2439 return btrace_step_stopped ();
2440
2441 case BTHR_CONT:
e3cfc1c7
MM
2442 status = record_btrace_single_step_forward (tp);
2443 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2444 break;
52834460 2445
e3cfc1c7
MM
2446 btinfo->flags |= flags;
2447 return btrace_step_again ();
52834460
MM
2448
2449 case BTHR_RCONT:
e3cfc1c7
MM
2450 status = record_btrace_single_step_backward (tp);
2451 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2452 break;
52834460 2453
e3cfc1c7
MM
2454 btinfo->flags |= flags;
2455 return btrace_step_again ();
2456 }
d825d248 2457
e3cfc1c7
MM
2458 /* We keep threads moving at the end of their execution history. The to_wait
2459 method will stop the thread for whom the event is reported. */
2460 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2461 btinfo->flags |= flags;
52834460 2462
e3cfc1c7 2463 return status;
b2f4cfde
MM
2464}
2465
e3cfc1c7
MM
2466/* A vector of threads. */
2467
2468typedef struct thread_info * tp_t;
2469DEF_VEC_P (tp_t);
2470
a6b5be76
MM
2471/* Announce further events if necessary. */
2472
2473static void
2474record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2475 const VEC (tp_t) *no_history)
2476{
2477 int more_moving, more_no_history;
2478
2479 more_moving = !VEC_empty (tp_t, moving);
2480 more_no_history = !VEC_empty (tp_t, no_history);
2481
2482 if (!more_moving && !more_no_history)
2483 return;
2484
2485 if (more_moving)
2486 DEBUG ("movers pending");
2487
2488 if (more_no_history)
2489 DEBUG ("no-history pending");
2490
2491 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2492}
2493
b2f4cfde
MM
2494/* The to_wait method of target record-btrace. */
2495
2496static ptid_t
2497record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2498 struct target_waitstatus *status, int options)
2499{
e3cfc1c7
MM
2500 VEC (tp_t) *moving, *no_history;
2501 struct thread_info *tp, *eventing;
2502 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2503
2504 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2505
b2f4cfde 2506 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2507 if ((execution_direction != EXEC_REVERSE)
2508 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2509 {
e75fdfca
TT
2510 ops = ops->beneath;
2511 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2512 }
2513
e3cfc1c7
MM
2514 moving = NULL;
2515 no_history = NULL;
2516
2517 make_cleanup (VEC_cleanup (tp_t), &moving);
2518 make_cleanup (VEC_cleanup (tp_t), &no_history);
2519
2520 /* Keep a work list of moving threads. */
2521 ALL_NON_EXITED_THREADS (tp)
2522 if (ptid_match (tp->ptid, ptid)
2523 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2524 VEC_safe_push (tp_t, moving, tp);
2525
2526 if (VEC_empty (tp_t, moving))
52834460 2527 {
e3cfc1c7 2528 *status = btrace_step_no_resumed ();
52834460 2529
e3cfc1c7
MM
2530 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2531 target_waitstatus_to_string (status));
2532
2533 do_cleanups (cleanups);
2534 return null_ptid;
52834460
MM
2535 }
2536
e3cfc1c7
MM
2537 /* Step moving threads one by one, one step each, until either one thread
2538 reports an event or we run out of threads to step.
2539
2540 When stepping more than one thread, chances are that some threads reach
2541 the end of their execution history earlier than others. If we reported
2542 this immediately, all-stop on top of non-stop would stop all threads and
2543 resume the same threads next time. And we would report the same thread
2544 having reached the end of its execution history again.
2545
2546 In the worst case, this would starve the other threads. But even if other
2547 threads would be allowed to make progress, this would result in far too
2548 many intermediate stops.
2549
2550 We therefore delay the reporting of "no execution history" until we have
2551 nothing else to report. By this time, all threads should have moved to
2552 either the beginning or the end of their execution history. There will
2553 be a single user-visible stop. */
2554 eventing = NULL;
2555 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2556 {
2557 unsigned int ix;
2558
2559 ix = 0;
2560 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2561 {
2562 *status = record_btrace_step_thread (tp);
2563
2564 switch (status->kind)
2565 {
2566 case TARGET_WAITKIND_IGNORE:
2567 ix++;
2568 break;
2569
2570 case TARGET_WAITKIND_NO_HISTORY:
2571 VEC_safe_push (tp_t, no_history,
2572 VEC_ordered_remove (tp_t, moving, ix));
2573 break;
2574
2575 default:
2576 eventing = VEC_unordered_remove (tp_t, moving, ix);
2577 break;
2578 }
2579 }
2580 }
2581
2582 if (eventing == NULL)
2583 {
2584 /* We started with at least one moving thread. This thread must have
2585 either stopped or reached the end of its execution history.
2586
2587 In the former case, EVENTING must not be NULL.
2588 In the latter case, NO_HISTORY must not be empty. */
2589 gdb_assert (!VEC_empty (tp_t, no_history));
2590
2591 /* We kept threads moving at the end of their execution history. Stop
2592 EVENTING now that we are going to report its stop. */
2593 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2594 eventing->btrace.flags &= ~BTHR_MOVE;
2595
2596 *status = btrace_step_no_history ();
2597 }
2598
2599 gdb_assert (eventing != NULL);
2600
2601 /* We kept threads replaying at the end of their execution history. Stop
2602 replaying EVENTING now that we are going to report its stop. */
2603 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2604
2605 /* Stop all other threads. */
5953356c 2606 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2607 ALL_NON_EXITED_THREADS (tp)
2608 record_btrace_cancel_resume (tp);
52834460 2609
a6b5be76
MM
2610 /* In async mode, we need to announce further events. */
2611 if (target_is_async_p ())
2612 record_btrace_maybe_mark_async_event (moving, no_history);
2613
52834460 2614 /* Start record histories anew from the current position. */
e3cfc1c7 2615 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2616
2617 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2618 registers_changed_ptid (eventing->ptid);
2619
43792cf0
PA
2620 DEBUG ("wait ended by thread %s (%s): %s",
2621 print_thread_id (eventing),
e3cfc1c7
MM
2622 target_pid_to_str (eventing->ptid),
2623 target_waitstatus_to_string (status));
52834460 2624
e3cfc1c7
MM
2625 do_cleanups (cleanups);
2626 return eventing->ptid;
52834460
MM
2627}
2628
6e4879f0
MM
2629/* The to_stop method of target record-btrace. */
2630
2631static void
2632record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2633{
2634 DEBUG ("stop %s", target_pid_to_str (ptid));
2635
2636 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2637 if ((execution_direction != EXEC_REVERSE)
2638 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2639 {
2640 ops = ops->beneath;
2641 ops->to_stop (ops, ptid);
2642 }
2643 else
2644 {
2645 struct thread_info *tp;
2646
2647 ALL_NON_EXITED_THREADS (tp)
2648 if (ptid_match (tp->ptid, ptid))
2649 {
2650 tp->btrace.flags &= ~BTHR_MOVE;
2651 tp->btrace.flags |= BTHR_STOP;
2652 }
2653 }
2654 }
2655
52834460
MM
2656/* The to_can_execute_reverse method of target record-btrace. */
2657
2658static int
19db3e69 2659record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2660{
2661 return 1;
2662}
2663
9e8915c6 2664/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2665
9e8915c6
PA
2666static int
2667record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2668{
a52eab48 2669 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2670 {
2671 struct thread_info *tp = inferior_thread ();
2672
2673 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2674 }
2675
2676 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2677}
2678
2679/* The to_supports_stopped_by_sw_breakpoint method of target
2680 record-btrace. */
2681
2682static int
2683record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2684{
a52eab48 2685 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2686 return 1;
2687
2688 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2689}
2690
2691/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2692
2693static int
2694record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2695{
a52eab48 2696 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2697 {
2698 struct thread_info *tp = inferior_thread ();
2699
2700 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2701 }
2702
2703 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2704}
2705
2706/* The to_supports_stopped_by_hw_breakpoint method of target
2707 record-btrace. */
2708
2709static int
2710record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2711{
a52eab48 2712 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2713 return 1;
52834460 2714
9e8915c6 2715 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2716}
2717
e8032dde 2718/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2719
2720static void
e8032dde 2721record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2722{
e8032dde 2723 /* We don't add or remove threads during replay. */
a52eab48 2724 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2725 return;
2726
2727 /* Forward the request. */
e75fdfca 2728 ops = ops->beneath;
e8032dde 2729 ops->to_update_thread_list (ops);
e2887aa3
MM
2730}
2731
2732/* The to_thread_alive method of target record-btrace. */
2733
2734static int
2735record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2736{
2737 /* We don't add or remove threads during replay. */
a52eab48 2738 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2739 return find_thread_ptid (ptid) != NULL;
2740
2741 /* Forward the request. */
e75fdfca
TT
2742 ops = ops->beneath;
2743 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2744}
2745
066ce621
MM
2746/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2747 is stopped. */
2748
2749static void
2750record_btrace_set_replay (struct thread_info *tp,
2751 const struct btrace_insn_iterator *it)
2752{
2753 struct btrace_thread_info *btinfo;
2754
2755 btinfo = &tp->btrace;
2756
2757 if (it == NULL || it->function == NULL)
52834460 2758 record_btrace_stop_replaying (tp);
066ce621
MM
2759 else
2760 {
2761 if (btinfo->replay == NULL)
52834460 2762 record_btrace_start_replaying (tp);
066ce621
MM
2763 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2764 return;
2765
2766 *btinfo->replay = *it;
52834460 2767 registers_changed_ptid (tp->ptid);
066ce621
MM
2768 }
2769
52834460
MM
2770 /* Start anew from the new replay position. */
2771 record_btrace_clear_histories (btinfo);
485668e5
MM
2772
2773 stop_pc = regcache_read_pc (get_current_regcache ());
2774 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2775}
2776
2777/* The to_goto_record_begin method of target record-btrace. */
2778
2779static void
08475817 2780record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2781{
2782 struct thread_info *tp;
2783 struct btrace_insn_iterator begin;
2784
2785 tp = require_btrace_thread ();
2786
2787 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2788
2789 /* Skip gaps at the beginning of the trace. */
2790 while (btrace_insn_get (&begin) == NULL)
2791 {
2792 unsigned int steps;
2793
2794 steps = btrace_insn_next (&begin, 1);
2795 if (steps == 0)
2796 error (_("No trace."));
2797 }
2798
066ce621 2799 record_btrace_set_replay (tp, &begin);
066ce621
MM
2800}
2801
2802/* The to_goto_record_end method of target record-btrace. */
2803
2804static void
307a1b91 2805record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2806{
2807 struct thread_info *tp;
2808
2809 tp = require_btrace_thread ();
2810
2811 record_btrace_set_replay (tp, NULL);
066ce621
MM
2812}
2813
2814/* The to_goto_record method of target record-btrace. */
2815
2816static void
606183ac 2817record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2818{
2819 struct thread_info *tp;
2820 struct btrace_insn_iterator it;
2821 unsigned int number;
2822 int found;
2823
2824 number = insn;
2825
2826 /* Check for wrap-arounds. */
2827 if (number != insn)
2828 error (_("Instruction number out of range."));
2829
2830 tp = require_btrace_thread ();
2831
2832 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2833 if (found == 0)
2834 error (_("No such instruction."));
2835
2836 record_btrace_set_replay (tp, &it);
066ce621
MM
2837}
2838
797094dd
MM
2839/* The to_record_stop_replaying method of target record-btrace. */
2840
2841static void
2842record_btrace_stop_replaying_all (struct target_ops *self)
2843{
2844 struct thread_info *tp;
2845
2846 ALL_NON_EXITED_THREADS (tp)
2847 record_btrace_stop_replaying (tp);
2848}
2849
70ad5bff
MM
2850/* The to_execution_direction target method. */
2851
2852static enum exec_direction_kind
2853record_btrace_execution_direction (struct target_ops *self)
2854{
2855 return record_btrace_resume_exec_dir;
2856}
2857
aef92902
MM
2858/* The to_prepare_to_generate_core target method. */
2859
2860static void
2861record_btrace_prepare_to_generate_core (struct target_ops *self)
2862{
2863 record_btrace_generating_corefile = 1;
2864}
2865
2866/* The to_done_generating_core target method. */
2867
2868static void
2869record_btrace_done_generating_core (struct target_ops *self)
2870{
2871 record_btrace_generating_corefile = 0;
2872}
2873
afedecd3
MM
2874/* Initialize the record-btrace target ops. */
2875
2876static void
2877init_record_btrace_ops (void)
2878{
2879 struct target_ops *ops;
2880
2881 ops = &record_btrace_ops;
2882 ops->to_shortname = "record-btrace";
2883 ops->to_longname = "Branch tracing target";
2884 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2885 ops->to_open = record_btrace_open;
2886 ops->to_close = record_btrace_close;
b7d2e916 2887 ops->to_async = record_btrace_async;
afedecd3 2888 ops->to_detach = record_detach;
c0272db5 2889 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2890 ops->to_mourn_inferior = record_mourn_inferior;
2891 ops->to_kill = record_kill;
afedecd3
MM
2892 ops->to_stop_recording = record_btrace_stop_recording;
2893 ops->to_info_record = record_btrace_info;
2894 ops->to_insn_history = record_btrace_insn_history;
2895 ops->to_insn_history_from = record_btrace_insn_history_from;
2896 ops->to_insn_history_range = record_btrace_insn_history_range;
2897 ops->to_call_history = record_btrace_call_history;
2898 ops->to_call_history_from = record_btrace_call_history_from;
2899 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2900 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2901 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2902 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2903 ops->to_xfer_partial = record_btrace_xfer_partial;
2904 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2905 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2906 ops->to_fetch_registers = record_btrace_fetch_registers;
2907 ops->to_store_registers = record_btrace_store_registers;
2908 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2909 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2910 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2911 ops->to_resume = record_btrace_resume;
85ad3aaf 2912 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2913 ops->to_wait = record_btrace_wait;
6e4879f0 2914 ops->to_stop = record_btrace_stop;
e8032dde 2915 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2916 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2917 ops->to_goto_record_begin = record_btrace_goto_begin;
2918 ops->to_goto_record_end = record_btrace_goto_end;
2919 ops->to_goto_record = record_btrace_goto;
52834460 2920 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2921 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2922 ops->to_supports_stopped_by_sw_breakpoint
2923 = record_btrace_supports_stopped_by_sw_breakpoint;
2924 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2925 ops->to_supports_stopped_by_hw_breakpoint
2926 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2927 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2928 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2929 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2930 ops->to_stratum = record_stratum;
2931 ops->to_magic = OPS_MAGIC;
2932}
2933
f4abbc16
MM
2934/* Start recording in BTS format. */
2935
2936static void
2937cmd_record_btrace_bts_start (char *args, int from_tty)
2938{
f4abbc16
MM
2939 if (args != NULL && *args != 0)
2940 error (_("Invalid argument."));
2941
2942 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2943
492d29ea
PA
2944 TRY
2945 {
2946 execute_command ("target record-btrace", from_tty);
2947 }
2948 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2949 {
2950 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2951 throw_exception (exception);
2952 }
492d29ea 2953 END_CATCH
f4abbc16
MM
2954}
2955
bc504a31 2956/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2957
2958static void
b20a6524 2959cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2960{
2961 if (args != NULL && *args != 0)
2962 error (_("Invalid argument."));
2963
b20a6524 2964 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2965
492d29ea
PA
2966 TRY
2967 {
2968 execute_command ("target record-btrace", from_tty);
2969 }
2970 CATCH (exception, RETURN_MASK_ALL)
2971 {
2972 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2973 throw_exception (exception);
2974 }
2975 END_CATCH
afedecd3
MM
2976}
2977
b20a6524
MM
2978/* Alias for "target record". */
2979
2980static void
2981cmd_record_btrace_start (char *args, int from_tty)
2982{
2983 if (args != NULL && *args != 0)
2984 error (_("Invalid argument."));
2985
2986 record_btrace_conf.format = BTRACE_FORMAT_PT;
2987
2988 TRY
2989 {
2990 execute_command ("target record-btrace", from_tty);
2991 }
2992 CATCH (exception, RETURN_MASK_ALL)
2993 {
2994 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2995
2996 TRY
2997 {
2998 execute_command ("target record-btrace", from_tty);
2999 }
3000 CATCH (exception, RETURN_MASK_ALL)
3001 {
3002 record_btrace_conf.format = BTRACE_FORMAT_NONE;
3003 throw_exception (exception);
3004 }
3005 END_CATCH
3006 }
3007 END_CATCH
3008}
3009
67b5c0c1
MM
3010/* The "set record btrace" command. */
3011
3012static void
3013cmd_set_record_btrace (char *args, int from_tty)
3014{
3015 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
3016}
3017
3018/* The "show record btrace" command. */
3019
3020static void
3021cmd_show_record_btrace (char *args, int from_tty)
3022{
3023 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
3024}
3025
3026/* The "show record btrace replay-memory-access" command. */
3027
3028static void
3029cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
3030 struct cmd_list_element *c, const char *value)
3031{
3032 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
3033 replay_memory_access);
3034}
3035
d33501a5
MM
3036/* The "set record btrace bts" command. */
3037
3038static void
3039cmd_set_record_btrace_bts (char *args, int from_tty)
3040{
3041 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3042 "by an appropriate subcommand.\n"));
d33501a5
MM
3043 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3044 all_commands, gdb_stdout);
3045}
3046
3047/* The "show record btrace bts" command. */
3048
3049static void
3050cmd_show_record_btrace_bts (char *args, int from_tty)
3051{
3052 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3053}
3054
b20a6524
MM
3055/* The "set record btrace pt" command. */
3056
3057static void
3058cmd_set_record_btrace_pt (char *args, int from_tty)
3059{
3060 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3061 "by an appropriate subcommand.\n"));
3062 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3063 all_commands, gdb_stdout);
3064}
3065
3066/* The "show record btrace pt" command. */
3067
3068static void
3069cmd_show_record_btrace_pt (char *args, int from_tty)
3070{
3071 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3072}
3073
3074/* The "record bts buffer-size" show value function. */
3075
3076static void
3077show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3078 struct cmd_list_element *c,
3079 const char *value)
3080{
3081 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3082 value);
3083}
3084
3085/* The "record pt buffer-size" show value function. */
3086
3087static void
3088show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3089 struct cmd_list_element *c,
3090 const char *value)
3091{
3092 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3093 value);
3094}
3095
afedecd3
MM
3096void _initialize_record_btrace (void);
3097
3098/* Initialize btrace commands. */
3099
3100void
3101_initialize_record_btrace (void)
3102{
f4abbc16
MM
3103 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3104 _("Start branch trace recording."), &record_btrace_cmdlist,
3105 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3106 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3107
f4abbc16
MM
3108 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3109 _("\
3110Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3111The processor stores a from/to record for each branch into a cyclic buffer.\n\
3112This format may not be available on all processors."),
3113 &record_btrace_cmdlist);
3114 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3115
b20a6524
MM
3116 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3117 _("\
bc504a31 3118Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3119This format may not be available on all processors."),
3120 &record_btrace_cmdlist);
3121 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3122
67b5c0c1
MM
3123 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3124 _("Set record options"), &set_record_btrace_cmdlist,
3125 "set record btrace ", 0, &set_record_cmdlist);
3126
3127 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3128 _("Show record options"), &show_record_btrace_cmdlist,
3129 "show record btrace ", 0, &show_record_cmdlist);
3130
3131 add_setshow_enum_cmd ("replay-memory-access", no_class,
3132 replay_memory_access_types, &replay_memory_access, _("\
3133Set what memory accesses are allowed during replay."), _("\
3134Show what memory accesses are allowed during replay."),
3135 _("Default is READ-ONLY.\n\n\
3136The btrace record target does not trace data.\n\
3137The memory therefore corresponds to the live target and not \
3138to the current replay position.\n\n\
3139When READ-ONLY, allow accesses to read-only memory during replay.\n\
3140When READ-WRITE, allow accesses to read-only and read-write memory during \
3141replay."),
3142 NULL, cmd_show_replay_memory_access,
3143 &set_record_btrace_cmdlist,
3144 &show_record_btrace_cmdlist);
3145
d33501a5
MM
3146 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3147 _("Set record btrace bts options"),
3148 &set_record_btrace_bts_cmdlist,
3149 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3150
3151 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3152 _("Show record btrace bts options"),
3153 &show_record_btrace_bts_cmdlist,
3154 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3155
3156 add_setshow_uinteger_cmd ("buffer-size", no_class,
3157 &record_btrace_conf.bts.size,
3158 _("Set the record/replay bts buffer size."),
3159 _("Show the record/replay bts buffer size."), _("\
3160When starting recording request a trace buffer of this size. \
3161The actual buffer size may differ from the requested size. \
3162Use \"info record\" to see the actual buffer size.\n\n\
3163Bigger buffers allow longer recording but also take more time to process \
3164the recorded execution trace.\n\n\
b20a6524
MM
3165The trace buffer size may not be changed while recording."), NULL,
3166 show_record_bts_buffer_size_value,
d33501a5
MM
3167 &set_record_btrace_bts_cmdlist,
3168 &show_record_btrace_bts_cmdlist);
3169
b20a6524
MM
3170 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3171 _("Set record btrace pt options"),
3172 &set_record_btrace_pt_cmdlist,
3173 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3174
3175 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3176 _("Show record btrace pt options"),
3177 &show_record_btrace_pt_cmdlist,
3178 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3179
3180 add_setshow_uinteger_cmd ("buffer-size", no_class,
3181 &record_btrace_conf.pt.size,
3182 _("Set the record/replay pt buffer size."),
3183 _("Show the record/replay pt buffer size."), _("\
3184Bigger buffers allow longer recording but also take more time to process \
3185the recorded execution.\n\
3186The actual buffer size may differ from the requested size. Use \"info record\" \
3187to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3188 &set_record_btrace_pt_cmdlist,
3189 &show_record_btrace_pt_cmdlist);
3190
afedecd3
MM
3191 init_record_btrace_ops ();
3192 add_target (&record_btrace_ops);
0b722aec
MM
3193
3194 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3195 xcalloc, xfree);
d33501a5
MM
3196
3197 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3198 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3199}