]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
Centralize thread ID printing
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
618f726f 3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
afedecd3
MM
29#include "cli/cli-utils.h"
30#include "source.h"
31#include "ui-out.h"
32#include "symtab.h"
33#include "filenames.h"
1f3ef581 34#include "regcache.h"
cecac1ab 35#include "frame-unwind.h"
0b722aec 36#include "hashtab.h"
45741a9c 37#include "infrun.h"
70ad5bff
MM
38#include "event-loop.h"
39#include "inf-loop.h"
e3cfc1c7 40#include "vec.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
f4abbc16
MM
74/* The current branch trace configuration. */
75static struct btrace_config record_btrace_conf;
76
77/* Command list for "record btrace". */
78static struct cmd_list_element *record_btrace_cmdlist;
79
d33501a5
MM
80/* Command lists for "set/show record btrace bts". */
81static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
b20a6524
MM
84/* Command lists for "set/show record btrace pt". */
85static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
afedecd3
MM
88/* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91#define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101/* Update the branch trace for the current thread and return a pointer to its
066ce621 102 thread_info.
afedecd3
MM
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
066ce621
MM
107static struct thread_info *
108require_btrace_thread (void)
afedecd3
MM
109{
110 struct thread_info *tp;
afedecd3
MM
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
6e07b1d2 120 if (btrace_is_empty (tp))
afedecd3
MM
121 error (_("No trace."));
122
066ce621
MM
123 return tp;
124}
125
126/* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132static struct btrace_thread_info *
133require_btrace (void)
134{
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
afedecd3
MM
140}
141
142/* Enable branch tracing for one thread. Warn on errors. */
143
144static void
145record_btrace_enable_warn (struct thread_info *tp)
146{
492d29ea
PA
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
afedecd3
MM
156}
157
158/* Callback function to disable branch tracing for one thread. */
159
160static void
161record_btrace_disable_callback (void *arg)
162{
19ba03f4 163 struct thread_info *tp = (struct thread_info *) arg;
afedecd3
MM
164
165 btrace_disable (tp);
166}
167
168/* Enable automatic tracing of new threads. */
169
170static void
171record_btrace_auto_enable (void)
172{
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177}
178
179/* Disable automatic tracing of new threads. */
180
181static void
182record_btrace_auto_disable (void)
183{
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192}
193
70ad5bff
MM
194/* The record-btrace async event handler function. */
195
196static void
197record_btrace_handle_async_inferior_event (gdb_client_data data)
198{
199 inferior_event_handler (INF_REG_EVENT, NULL);
200}
201
afedecd3
MM
202/* The to_open method of target record-btrace. */
203
204static void
014f9477 205record_btrace_open (const char *args, int from_tty)
afedecd3
MM
206{
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
209
210 DEBUG ("open");
211
8213266a 212 record_preopen ();
afedecd3
MM
213
214 if (!target_has_execution)
215 error (_("The program is not being run."));
216
afedecd3
MM
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 220 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
f4abbc16 223 btrace_enable (tp, &record_btrace_conf);
afedecd3
MM
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
70ad5bff
MM
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
aef92902 235 record_btrace_generating_corefile = 0;
70ad5bff 236
afedecd3
MM
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240}
241
242/* The to_stop_recording method of target record-btrace. */
243
244static void
c6cd7c02 245record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
246{
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
034f788c 253 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256}
257
258/* The to_close method of target record-btrace. */
259
260static void
de90e03d 261record_btrace_close (struct target_ops *self)
afedecd3 262{
568e808b
MM
263 struct thread_info *tp;
264
70ad5bff
MM
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
99c819ee
MM
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
568e808b
MM
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
034f788c 274 ALL_NON_EXITED_THREADS (tp)
568e808b 275 btrace_teardown (tp);
afedecd3
MM
276}
277
b7d2e916
PA
278/* The to_async method of target record-btrace. */
279
280static void
6a3753b3 281record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 282{
6a3753b3 283 if (enable)
b7d2e916
PA
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
285 else
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
287
6a3753b3 288 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
289}
290
d33501a5
MM
291/* Adjusts the size and returns a human readable size suffix. */
292
293static const char *
294record_btrace_adjust_size (unsigned int *size)
295{
296 unsigned int sz;
297
298 sz = *size;
299
300 if ((sz & ((1u << 30) - 1)) == 0)
301 {
302 *size = sz >> 30;
303 return "GB";
304 }
305 else if ((sz & ((1u << 20) - 1)) == 0)
306 {
307 *size = sz >> 20;
308 return "MB";
309 }
310 else if ((sz & ((1u << 10) - 1)) == 0)
311 {
312 *size = sz >> 10;
313 return "kB";
314 }
315 else
316 return "";
317}
318
319/* Print a BTS configuration. */
320
321static void
322record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
323{
324 const char *suffix;
325 unsigned int size;
326
327 size = conf->size;
328 if (size > 0)
329 {
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
332 }
333}
334
bc504a31 335/* Print an Intel Processor Trace configuration. */
b20a6524
MM
336
337static void
338record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
339{
340 const char *suffix;
341 unsigned int size;
342
343 size = conf->size;
344 if (size > 0)
345 {
346 suffix = record_btrace_adjust_size (&size);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
348 }
349}
350
d33501a5
MM
351/* Print a branch tracing configuration. */
352
353static void
354record_btrace_print_conf (const struct btrace_config *conf)
355{
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf->format));
358
359 switch (conf->format)
360 {
361 case BTRACE_FORMAT_NONE:
362 return;
363
364 case BTRACE_FORMAT_BTS:
365 record_btrace_print_bts_conf (&conf->bts);
366 return;
b20a6524
MM
367
368 case BTRACE_FORMAT_PT:
369 record_btrace_print_pt_conf (&conf->pt);
370 return;
d33501a5
MM
371 }
372
373 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
374}
375
afedecd3
MM
376/* The to_info_record method of target record-btrace. */
377
378static void
630d6a4a 379record_btrace_info (struct target_ops *self)
afedecd3
MM
380{
381 struct btrace_thread_info *btinfo;
f4abbc16 382 const struct btrace_config *conf;
afedecd3 383 struct thread_info *tp;
31fd9caa 384 unsigned int insns, calls, gaps;
afedecd3
MM
385
386 DEBUG ("info");
387
388 tp = find_thread_ptid (inferior_ptid);
389 if (tp == NULL)
390 error (_("No thread."));
391
f4abbc16
MM
392 btinfo = &tp->btrace;
393
394 conf = btrace_conf (btinfo);
395 if (conf != NULL)
d33501a5 396 record_btrace_print_conf (conf);
f4abbc16 397
afedecd3
MM
398 btrace_fetch (tp);
399
23a7fe75
MM
400 insns = 0;
401 calls = 0;
31fd9caa 402 gaps = 0;
23a7fe75 403
6e07b1d2 404 if (!btrace_is_empty (tp))
23a7fe75
MM
405 {
406 struct btrace_call_iterator call;
407 struct btrace_insn_iterator insn;
408
409 btrace_call_end (&call, btinfo);
410 btrace_call_prev (&call, 1);
5de9129b 411 calls = btrace_call_number (&call);
23a7fe75
MM
412
413 btrace_insn_end (&insn, btinfo);
31fd9caa 414
5de9129b 415 insns = btrace_insn_number (&insn);
31fd9caa
MM
416 if (insns != 0)
417 {
418 /* The last instruction does not really belong to the trace. */
419 insns -= 1;
420 }
421 else
422 {
423 unsigned int steps;
424
425 /* Skip gaps at the end. */
426 do
427 {
428 steps = btrace_insn_prev (&insn, 1);
429 if (steps == 0)
430 break;
431
432 insns = btrace_insn_number (&insn);
433 }
434 while (insns == 0);
435 }
436
437 gaps = btinfo->ngaps;
23a7fe75 438 }
afedecd3 439
31fd9caa 440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
441 "for thread %s (%s).\n"), insns, calls, gaps,
442 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
443
444 if (btrace_is_replaying (tp))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo->replay));
afedecd3
MM
447}
448
31fd9caa
MM
449/* Print a decode error. */
450
451static void
452btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
453 enum btrace_format format)
454{
455 const char *errstr;
456 int is_error;
457
458 errstr = _("unknown");
459 is_error = 1;
460
461 switch (format)
462 {
463 default:
464 break;
465
466 case BTRACE_FORMAT_BTS:
467 switch (errcode)
468 {
469 default:
470 break;
471
472 case BDE_BTS_OVERFLOW:
473 errstr = _("instruction overflow");
474 break;
475
476 case BDE_BTS_INSN_SIZE:
477 errstr = _("unknown instruction");
478 break;
479 }
480 break;
b20a6524
MM
481
482#if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT:
484 switch (errcode)
485 {
486 case BDE_PT_USER_QUIT:
487 is_error = 0;
488 errstr = _("trace decode cancelled");
489 break;
490
491 case BDE_PT_DISABLED:
492 is_error = 0;
493 errstr = _("disabled");
494 break;
495
496 case BDE_PT_OVERFLOW:
497 is_error = 0;
498 errstr = _("overflow");
499 break;
500
501 default:
502 if (errcode < 0)
503 errstr = pt_errstr (pt_errcode (errcode));
504 break;
505 }
506 break;
507#endif /* defined (HAVE_LIBIPT) */
31fd9caa
MM
508 }
509
510 ui_out_text (uiout, _("["));
511 if (is_error)
512 {
513 ui_out_text (uiout, _("decode error ("));
514 ui_out_field_int (uiout, "errcode", errcode);
515 ui_out_text (uiout, _("): "));
516 }
517 ui_out_text (uiout, errstr);
518 ui_out_text (uiout, _("]\n"));
519}
520
afedecd3
MM
521/* Print an unsigned int. */
522
523static void
524ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
525{
526 ui_out_field_fmt (uiout, fld, "%u", val);
527}
528
f94cc897
MM
529/* A range of source lines. */
530
531struct btrace_line_range
532{
533 /* The symtab this line is from. */
534 struct symtab *symtab;
535
536 /* The first line (inclusive). */
537 int begin;
538
539 /* The last line (exclusive). */
540 int end;
541};
542
543/* Construct a line range. */
544
545static struct btrace_line_range
546btrace_mk_line_range (struct symtab *symtab, int begin, int end)
547{
548 struct btrace_line_range range;
549
550 range.symtab = symtab;
551 range.begin = begin;
552 range.end = end;
553
554 return range;
555}
556
557/* Add a line to a line range. */
558
559static struct btrace_line_range
560btrace_line_range_add (struct btrace_line_range range, int line)
561{
562 if (range.end <= range.begin)
563 {
564 /* This is the first entry. */
565 range.begin = line;
566 range.end = line + 1;
567 }
568 else if (line < range.begin)
569 range.begin = line;
570 else if (range.end < line)
571 range.end = line;
572
573 return range;
574}
575
576/* Return non-zero if RANGE is empty, zero otherwise. */
577
578static int
579btrace_line_range_is_empty (struct btrace_line_range range)
580{
581 return range.end <= range.begin;
582}
583
584/* Return non-zero if LHS contains RHS, zero otherwise. */
585
586static int
587btrace_line_range_contains_range (struct btrace_line_range lhs,
588 struct btrace_line_range rhs)
589{
590 return ((lhs.symtab == rhs.symtab)
591 && (lhs.begin <= rhs.begin)
592 && (rhs.end <= lhs.end));
593}
594
595/* Find the line range associated with PC. */
596
597static struct btrace_line_range
598btrace_find_line_range (CORE_ADDR pc)
599{
600 struct btrace_line_range range;
601 struct linetable_entry *lines;
602 struct linetable *ltable;
603 struct symtab *symtab;
604 int nlines, i;
605
606 symtab = find_pc_line_symtab (pc);
607 if (symtab == NULL)
608 return btrace_mk_line_range (NULL, 0, 0);
609
610 ltable = SYMTAB_LINETABLE (symtab);
611 if (ltable == NULL)
612 return btrace_mk_line_range (symtab, 0, 0);
613
614 nlines = ltable->nitems;
615 lines = ltable->item;
616 if (nlines <= 0)
617 return btrace_mk_line_range (symtab, 0, 0);
618
619 range = btrace_mk_line_range (symtab, 0, 0);
620 for (i = 0; i < nlines - 1; i++)
621 {
622 if ((lines[i].pc == pc) && (lines[i].line != 0))
623 range = btrace_line_range_add (range, lines[i].line);
624 }
625
626 return range;
627}
628
629/* Print source lines in LINES to UIOUT.
630
631 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
632 instructions corresponding to that source line. When printing a new source
633 line, we do the cleanups for the open chain and open a new cleanup chain for
634 the new source line. If the source line range in LINES is not empty, this
635 function will leave the cleanup chain for the last printed source line open
636 so instructions can be added to it. */
637
638static void
639btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
640 struct cleanup **ui_item_chain, int flags)
641{
8d297bbf 642 print_source_lines_flags psl_flags;
f94cc897
MM
643 int line;
644
645 psl_flags = 0;
646 if (flags & DISASSEMBLY_FILENAME)
647 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
648
649 for (line = lines.begin; line < lines.end; ++line)
650 {
651 if (*ui_item_chain != NULL)
652 do_cleanups (*ui_item_chain);
653
654 *ui_item_chain
655 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
656
657 print_source_lines (lines.symtab, line, line + 1, psl_flags);
658
659 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
660 }
661}
662
afedecd3
MM
663/* Disassemble a section of the recorded instruction trace. */
664
665static void
23a7fe75 666btrace_insn_history (struct ui_out *uiout,
31fd9caa 667 const struct btrace_thread_info *btinfo,
23a7fe75
MM
668 const struct btrace_insn_iterator *begin,
669 const struct btrace_insn_iterator *end, int flags)
afedecd3 670{
f94cc897
MM
671 struct ui_file *stb;
672 struct cleanup *cleanups, *ui_item_chain;
673 struct disassemble_info di;
afedecd3 674 struct gdbarch *gdbarch;
23a7fe75 675 struct btrace_insn_iterator it;
f94cc897 676 struct btrace_line_range last_lines;
afedecd3 677
23a7fe75
MM
678 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
679 btrace_insn_number (end));
afedecd3 680
f94cc897
MM
681 flags |= DISASSEMBLY_SPECULATIVE;
682
afedecd3 683 gdbarch = target_gdbarch ();
f94cc897
MM
684 stb = mem_fileopen ();
685 cleanups = make_cleanup_ui_file_delete (stb);
686 di = gdb_disassemble_info (gdbarch, stb);
687 last_lines = btrace_mk_line_range (NULL, 0, 0);
688
689 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
690
691 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
692 instructions corresponding to that line. */
693 ui_item_chain = NULL;
afedecd3 694
23a7fe75 695 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 696 {
23a7fe75
MM
697 const struct btrace_insn *insn;
698
699 insn = btrace_insn_get (&it);
700
31fd9caa
MM
701 /* A NULL instruction indicates a gap in the trace. */
702 if (insn == NULL)
703 {
704 const struct btrace_config *conf;
705
706 conf = btrace_conf (btinfo);
afedecd3 707
31fd9caa
MM
708 /* We have trace so we must have a configuration. */
709 gdb_assert (conf != NULL);
710
711 btrace_ui_out_decode_error (uiout, it.function->errcode,
712 conf->format);
713 }
714 else
715 {
f94cc897 716 struct disasm_insn dinsn;
da8c46d2 717
f94cc897 718 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 719 {
f94cc897
MM
720 struct btrace_line_range lines;
721
722 lines = btrace_find_line_range (insn->pc);
723 if (!btrace_line_range_is_empty (lines)
724 && !btrace_line_range_contains_range (last_lines, lines))
725 {
726 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
727 last_lines = lines;
728 }
729 else if (ui_item_chain == NULL)
730 {
731 ui_item_chain
732 = make_cleanup_ui_out_tuple_begin_end (uiout,
733 "src_and_asm_line");
734 /* No source information. */
735 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
736 }
737
738 gdb_assert (ui_item_chain != NULL);
da8c46d2 739 }
da8c46d2 740
f94cc897
MM
741 memset (&dinsn, 0, sizeof (dinsn));
742 dinsn.number = btrace_insn_number (&it);
743 dinsn.addr = insn->pc;
31fd9caa 744
da8c46d2 745 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 746 dinsn.is_speculative = 1;
da8c46d2 747
f94cc897 748 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
31fd9caa 749 }
afedecd3 750 }
f94cc897
MM
751
752 do_cleanups (cleanups);
afedecd3
MM
753}
754
755/* The to_insn_history method of target record-btrace. */
756
757static void
7a6c5609 758record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
759{
760 struct btrace_thread_info *btinfo;
23a7fe75
MM
761 struct btrace_insn_history *history;
762 struct btrace_insn_iterator begin, end;
afedecd3
MM
763 struct cleanup *uiout_cleanup;
764 struct ui_out *uiout;
23a7fe75 765 unsigned int context, covered;
afedecd3
MM
766
767 uiout = current_uiout;
768 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
769 "insn history");
afedecd3 770 context = abs (size);
afedecd3
MM
771 if (context == 0)
772 error (_("Bad record instruction-history-size."));
773
23a7fe75
MM
774 btinfo = require_btrace ();
775 history = btinfo->insn_history;
776 if (history == NULL)
afedecd3 777 {
07bbe694 778 struct btrace_insn_iterator *replay;
afedecd3 779
23a7fe75 780 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 781
07bbe694
MM
782 /* If we're replaying, we start at the replay position. Otherwise, we
783 start at the tail of the trace. */
784 replay = btinfo->replay;
785 if (replay != NULL)
786 begin = *replay;
787 else
788 btrace_insn_end (&begin, btinfo);
789
790 /* We start from here and expand in the requested direction. Then we
791 expand in the other direction, as well, to fill up any remaining
792 context. */
793 end = begin;
794 if (size < 0)
795 {
796 /* We want the current position covered, as well. */
797 covered = btrace_insn_next (&end, 1);
798 covered += btrace_insn_prev (&begin, context - covered);
799 covered += btrace_insn_next (&end, context - covered);
800 }
801 else
802 {
803 covered = btrace_insn_next (&end, context);
804 covered += btrace_insn_prev (&begin, context - covered);
805 }
afedecd3
MM
806 }
807 else
808 {
23a7fe75
MM
809 begin = history->begin;
810 end = history->end;
afedecd3 811
23a7fe75
MM
812 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
813 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 814
23a7fe75
MM
815 if (size < 0)
816 {
817 end = begin;
818 covered = btrace_insn_prev (&begin, context);
819 }
820 else
821 {
822 begin = end;
823 covered = btrace_insn_next (&end, context);
824 }
afedecd3
MM
825 }
826
23a7fe75 827 if (covered > 0)
31fd9caa 828 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
829 else
830 {
831 if (size < 0)
832 printf_unfiltered (_("At the start of the branch trace record.\n"));
833 else
834 printf_unfiltered (_("At the end of the branch trace record.\n"));
835 }
afedecd3 836
23a7fe75 837 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
838 do_cleanups (uiout_cleanup);
839}
840
841/* The to_insn_history_range method of target record-btrace. */
842
843static void
4e99c6b7
TT
844record_btrace_insn_history_range (struct target_ops *self,
845 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
846{
847 struct btrace_thread_info *btinfo;
23a7fe75
MM
848 struct btrace_insn_history *history;
849 struct btrace_insn_iterator begin, end;
afedecd3
MM
850 struct cleanup *uiout_cleanup;
851 struct ui_out *uiout;
23a7fe75
MM
852 unsigned int low, high;
853 int found;
afedecd3
MM
854
855 uiout = current_uiout;
856 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
857 "insn history");
23a7fe75
MM
858 low = from;
859 high = to;
afedecd3 860
23a7fe75 861 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
862
863 /* Check for wrap-arounds. */
23a7fe75 864 if (low != from || high != to)
afedecd3
MM
865 error (_("Bad range."));
866
0688d04e 867 if (high < low)
afedecd3
MM
868 error (_("Bad range."));
869
23a7fe75 870 btinfo = require_btrace ();
afedecd3 871
23a7fe75
MM
872 found = btrace_find_insn_by_number (&begin, btinfo, low);
873 if (found == 0)
874 error (_("Range out of bounds."));
afedecd3 875
23a7fe75
MM
876 found = btrace_find_insn_by_number (&end, btinfo, high);
877 if (found == 0)
0688d04e
MM
878 {
879 /* Silently truncate the range. */
880 btrace_insn_end (&end, btinfo);
881 }
882 else
883 {
884 /* We want both begin and end to be inclusive. */
885 btrace_insn_next (&end, 1);
886 }
afedecd3 887
31fd9caa 888 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 889 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
890
891 do_cleanups (uiout_cleanup);
892}
893
894/* The to_insn_history_from method of target record-btrace. */
895
896static void
9abc3ff3
TT
897record_btrace_insn_history_from (struct target_ops *self,
898 ULONGEST from, int size, int flags)
afedecd3
MM
899{
900 ULONGEST begin, end, context;
901
902 context = abs (size);
0688d04e
MM
903 if (context == 0)
904 error (_("Bad record instruction-history-size."));
afedecd3
MM
905
906 if (size < 0)
907 {
908 end = from;
909
910 if (from < context)
911 begin = 0;
912 else
0688d04e 913 begin = from - context + 1;
afedecd3
MM
914 }
915 else
916 {
917 begin = from;
0688d04e 918 end = from + context - 1;
afedecd3
MM
919
920 /* Check for wrap-around. */
921 if (end < begin)
922 end = ULONGEST_MAX;
923 }
924
4e99c6b7 925 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
926}
927
928/* Print the instruction number range for a function call history line. */
929
930static void
23a7fe75
MM
931btrace_call_history_insn_range (struct ui_out *uiout,
932 const struct btrace_function *bfun)
afedecd3 933{
7acbe133
MM
934 unsigned int begin, end, size;
935
936 size = VEC_length (btrace_insn_s, bfun->insn);
937 gdb_assert (size > 0);
afedecd3 938
23a7fe75 939 begin = bfun->insn_offset;
7acbe133 940 end = begin + size - 1;
afedecd3 941
23a7fe75 942 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 943 ui_out_text (uiout, ",");
23a7fe75 944 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
945}
946
ce0dfbea
MM
947/* Compute the lowest and highest source line for the instructions in BFUN
948 and return them in PBEGIN and PEND.
949 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
950 result from inlining or macro expansion. */
951
952static void
953btrace_compute_src_line_range (const struct btrace_function *bfun,
954 int *pbegin, int *pend)
955{
956 struct btrace_insn *insn;
957 struct symtab *symtab;
958 struct symbol *sym;
959 unsigned int idx;
960 int begin, end;
961
962 begin = INT_MAX;
963 end = INT_MIN;
964
965 sym = bfun->sym;
966 if (sym == NULL)
967 goto out;
968
969 symtab = symbol_symtab (sym);
970
971 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
972 {
973 struct symtab_and_line sal;
974
975 sal = find_pc_line (insn->pc, 0);
976 if (sal.symtab != symtab || sal.line == 0)
977 continue;
978
979 begin = min (begin, sal.line);
980 end = max (end, sal.line);
981 }
982
983 out:
984 *pbegin = begin;
985 *pend = end;
986}
987
afedecd3
MM
988/* Print the source line information for a function call history line. */
989
990static void
23a7fe75
MM
991btrace_call_history_src_line (struct ui_out *uiout,
992 const struct btrace_function *bfun)
afedecd3
MM
993{
994 struct symbol *sym;
23a7fe75 995 int begin, end;
afedecd3
MM
996
997 sym = bfun->sym;
998 if (sym == NULL)
999 return;
1000
1001 ui_out_field_string (uiout, "file",
08be3fe3 1002 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 1003
ce0dfbea 1004 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 1005 if (end < begin)
afedecd3
MM
1006 return;
1007
1008 ui_out_text (uiout, ":");
23a7fe75 1009 ui_out_field_int (uiout, "min line", begin);
afedecd3 1010
23a7fe75 1011 if (end == begin)
afedecd3
MM
1012 return;
1013
8710b709 1014 ui_out_text (uiout, ",");
23a7fe75 1015 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
1016}
1017
0b722aec
MM
1018/* Get the name of a branch trace function. */
1019
1020static const char *
1021btrace_get_bfun_name (const struct btrace_function *bfun)
1022{
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 if (bfun == NULL)
1027 return "??";
1028
1029 msym = bfun->msym;
1030 sym = bfun->sym;
1031
1032 if (sym != NULL)
1033 return SYMBOL_PRINT_NAME (sym);
1034 else if (msym != NULL)
efd66ac6 1035 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1036 else
1037 return "??";
1038}
1039
afedecd3
MM
1040/* Disassemble a section of the recorded function trace. */
1041
1042static void
23a7fe75 1043btrace_call_history (struct ui_out *uiout,
8710b709 1044 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1045 const struct btrace_call_iterator *begin,
1046 const struct btrace_call_iterator *end,
8d297bbf 1047 int int_flags)
afedecd3 1048{
23a7fe75 1049 struct btrace_call_iterator it;
8d297bbf 1050 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1051
8d297bbf 1052 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1053 btrace_call_number (end));
afedecd3 1054
23a7fe75 1055 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1056 {
23a7fe75
MM
1057 const struct btrace_function *bfun;
1058 struct minimal_symbol *msym;
1059 struct symbol *sym;
1060
1061 bfun = btrace_call_get (&it);
23a7fe75 1062 sym = bfun->sym;
0b722aec 1063 msym = bfun->msym;
23a7fe75 1064
afedecd3 1065 /* Print the function index. */
23a7fe75 1066 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
1067 ui_out_text (uiout, "\t");
1068
31fd9caa
MM
1069 /* Indicate gaps in the trace. */
1070 if (bfun->errcode != 0)
1071 {
1072 const struct btrace_config *conf;
1073
1074 conf = btrace_conf (btinfo);
1075
1076 /* We have trace so we must have a configuration. */
1077 gdb_assert (conf != NULL);
1078
1079 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1080
1081 continue;
1082 }
1083
8710b709
MM
1084 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1085 {
1086 int level = bfun->level + btinfo->level, i;
1087
1088 for (i = 0; i < level; ++i)
1089 ui_out_text (uiout, " ");
1090 }
1091
1092 if (sym != NULL)
1093 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1094 else if (msym != NULL)
efd66ac6 1095 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
1096 else if (!ui_out_is_mi_like_p (uiout))
1097 ui_out_field_string (uiout, "function", "??");
1098
1e038f67 1099 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1100 {
8710b709 1101 ui_out_text (uiout, _("\tinst "));
23a7fe75 1102 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1103 }
1104
1e038f67 1105 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1106 {
8710b709 1107 ui_out_text (uiout, _("\tat "));
23a7fe75 1108 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1109 }
1110
afedecd3
MM
1111 ui_out_text (uiout, "\n");
1112 }
1113}
1114
1115/* The to_call_history method of target record-btrace. */
1116
1117static void
8d297bbf 1118record_btrace_call_history (struct target_ops *self, int size, int int_flags)
afedecd3
MM
1119{
1120 struct btrace_thread_info *btinfo;
23a7fe75
MM
1121 struct btrace_call_history *history;
1122 struct btrace_call_iterator begin, end;
afedecd3
MM
1123 struct cleanup *uiout_cleanup;
1124 struct ui_out *uiout;
23a7fe75 1125 unsigned int context, covered;
8d297bbf 1126 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1127
1128 uiout = current_uiout;
1129 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1130 "insn history");
afedecd3 1131 context = abs (size);
afedecd3
MM
1132 if (context == 0)
1133 error (_("Bad record function-call-history-size."));
1134
23a7fe75
MM
1135 btinfo = require_btrace ();
1136 history = btinfo->call_history;
1137 if (history == NULL)
afedecd3 1138 {
07bbe694 1139 struct btrace_insn_iterator *replay;
afedecd3 1140
8d297bbf 1141 DEBUG ("call-history (0x%x): %d", int_flags, size);
afedecd3 1142
07bbe694
MM
1143 /* If we're replaying, we start at the replay position. Otherwise, we
1144 start at the tail of the trace. */
1145 replay = btinfo->replay;
1146 if (replay != NULL)
1147 {
1148 begin.function = replay->function;
1149 begin.btinfo = btinfo;
1150 }
1151 else
1152 btrace_call_end (&begin, btinfo);
1153
1154 /* We start from here and expand in the requested direction. Then we
1155 expand in the other direction, as well, to fill up any remaining
1156 context. */
1157 end = begin;
1158 if (size < 0)
1159 {
1160 /* We want the current position covered, as well. */
1161 covered = btrace_call_next (&end, 1);
1162 covered += btrace_call_prev (&begin, context - covered);
1163 covered += btrace_call_next (&end, context - covered);
1164 }
1165 else
1166 {
1167 covered = btrace_call_next (&end, context);
1168 covered += btrace_call_prev (&begin, context- covered);
1169 }
afedecd3
MM
1170 }
1171 else
1172 {
23a7fe75
MM
1173 begin = history->begin;
1174 end = history->end;
afedecd3 1175
8d297bbf 1176 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
23a7fe75 1177 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1178
23a7fe75
MM
1179 if (size < 0)
1180 {
1181 end = begin;
1182 covered = btrace_call_prev (&begin, context);
1183 }
1184 else
1185 {
1186 begin = end;
1187 covered = btrace_call_next (&end, context);
1188 }
afedecd3
MM
1189 }
1190
23a7fe75 1191 if (covered > 0)
8710b709 1192 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1193 else
1194 {
1195 if (size < 0)
1196 printf_unfiltered (_("At the start of the branch trace record.\n"));
1197 else
1198 printf_unfiltered (_("At the end of the branch trace record.\n"));
1199 }
afedecd3 1200
23a7fe75 1201 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1202 do_cleanups (uiout_cleanup);
1203}
1204
1205/* The to_call_history_range method of target record-btrace. */
1206
1207static void
f0d960ea 1208record_btrace_call_history_range (struct target_ops *self,
8d297bbf
PA
1209 ULONGEST from, ULONGEST to,
1210 int int_flags)
afedecd3
MM
1211{
1212 struct btrace_thread_info *btinfo;
23a7fe75
MM
1213 struct btrace_call_history *history;
1214 struct btrace_call_iterator begin, end;
afedecd3
MM
1215 struct cleanup *uiout_cleanup;
1216 struct ui_out *uiout;
23a7fe75
MM
1217 unsigned int low, high;
1218 int found;
8d297bbf 1219 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1220
1221 uiout = current_uiout;
1222 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1223 "func history");
23a7fe75
MM
1224 low = from;
1225 high = to;
afedecd3 1226
8d297bbf 1227 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
afedecd3
MM
1228
1229 /* Check for wrap-arounds. */
23a7fe75 1230 if (low != from || high != to)
afedecd3
MM
1231 error (_("Bad range."));
1232
0688d04e 1233 if (high < low)
afedecd3
MM
1234 error (_("Bad range."));
1235
23a7fe75 1236 btinfo = require_btrace ();
afedecd3 1237
23a7fe75
MM
1238 found = btrace_find_call_by_number (&begin, btinfo, low);
1239 if (found == 0)
1240 error (_("Range out of bounds."));
afedecd3 1241
23a7fe75
MM
1242 found = btrace_find_call_by_number (&end, btinfo, high);
1243 if (found == 0)
0688d04e
MM
1244 {
1245 /* Silently truncate the range. */
1246 btrace_call_end (&end, btinfo);
1247 }
1248 else
1249 {
1250 /* We want both begin and end to be inclusive. */
1251 btrace_call_next (&end, 1);
1252 }
afedecd3 1253
8710b709 1254 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1255 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1256
1257 do_cleanups (uiout_cleanup);
1258}
1259
1260/* The to_call_history_from method of target record-btrace. */
1261
1262static void
ec0aea04 1263record_btrace_call_history_from (struct target_ops *self,
8d297bbf
PA
1264 ULONGEST from, int size,
1265 int int_flags)
afedecd3
MM
1266{
1267 ULONGEST begin, end, context;
8d297bbf 1268 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3
MM
1269
1270 context = abs (size);
0688d04e
MM
1271 if (context == 0)
1272 error (_("Bad record function-call-history-size."));
afedecd3
MM
1273
1274 if (size < 0)
1275 {
1276 end = from;
1277
1278 if (from < context)
1279 begin = 0;
1280 else
0688d04e 1281 begin = from - context + 1;
afedecd3
MM
1282 }
1283 else
1284 {
1285 begin = from;
0688d04e 1286 end = from + context - 1;
afedecd3
MM
1287
1288 /* Check for wrap-around. */
1289 if (end < begin)
1290 end = ULONGEST_MAX;
1291 }
1292
f0d960ea 1293 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1294}
1295
07bbe694
MM
1296/* The to_record_is_replaying method of target record-btrace. */
1297
1298static int
a52eab48 1299record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1300{
1301 struct thread_info *tp;
1302
034f788c 1303 ALL_NON_EXITED_THREADS (tp)
a52eab48 1304 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1305 return 1;
1306
1307 return 0;
1308}
1309
7ff27e9b
MM
1310/* The to_record_will_replay method of target record-btrace. */
1311
1312static int
1313record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1314{
1315 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1316}
1317
633785ff
MM
1318/* The to_xfer_partial method of target record-btrace. */
1319
9b409511 1320static enum target_xfer_status
633785ff
MM
1321record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1322 const char *annex, gdb_byte *readbuf,
1323 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1324 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
1325{
1326 struct target_ops *t;
1327
1328 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1329 if (replay_memory_access == replay_memory_access_read_only
aef92902 1330 && !record_btrace_generating_corefile
4d10e986 1331 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1332 {
1333 switch (object)
1334 {
1335 case TARGET_OBJECT_MEMORY:
1336 {
1337 struct target_section *section;
1338
1339 /* We do not allow writing memory in general. */
1340 if (writebuf != NULL)
9b409511
YQ
1341 {
1342 *xfered_len = len;
bc113b4e 1343 return TARGET_XFER_UNAVAILABLE;
9b409511 1344 }
633785ff
MM
1345
1346 /* We allow reading readonly memory. */
1347 section = target_section_by_addr (ops, offset);
1348 if (section != NULL)
1349 {
1350 /* Check if the section we found is readonly. */
1351 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1352 section->the_bfd_section)
1353 & SEC_READONLY) != 0)
1354 {
1355 /* Truncate the request to fit into this section. */
1356 len = min (len, section->endaddr - offset);
1357 break;
1358 }
1359 }
1360
9b409511 1361 *xfered_len = len;
bc113b4e 1362 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1363 }
1364 }
1365 }
1366
1367 /* Forward the request. */
e75fdfca
TT
1368 ops = ops->beneath;
1369 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1370 offset, len, xfered_len);
633785ff
MM
1371}
1372
1373/* The to_insert_breakpoint method of target record-btrace. */
1374
1375static int
1376record_btrace_insert_breakpoint (struct target_ops *ops,
1377 struct gdbarch *gdbarch,
1378 struct bp_target_info *bp_tgt)
1379{
67b5c0c1
MM
1380 const char *old;
1381 int ret;
633785ff
MM
1382
1383 /* Inserting breakpoints requires accessing memory. Allow it for the
1384 duration of this function. */
67b5c0c1
MM
1385 old = replay_memory_access;
1386 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1387
1388 ret = 0;
492d29ea
PA
1389 TRY
1390 {
1391 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1392 }
492d29ea
PA
1393 CATCH (except, RETURN_MASK_ALL)
1394 {
6c63c96a 1395 replay_memory_access = old;
492d29ea
PA
1396 throw_exception (except);
1397 }
1398 END_CATCH
6c63c96a 1399 replay_memory_access = old;
633785ff
MM
1400
1401 return ret;
1402}
1403
1404/* The to_remove_breakpoint method of target record-btrace. */
1405
1406static int
1407record_btrace_remove_breakpoint (struct target_ops *ops,
1408 struct gdbarch *gdbarch,
1409 struct bp_target_info *bp_tgt)
1410{
67b5c0c1
MM
1411 const char *old;
1412 int ret;
633785ff
MM
1413
1414 /* Removing breakpoints requires accessing memory. Allow it for the
1415 duration of this function. */
67b5c0c1
MM
1416 old = replay_memory_access;
1417 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1418
1419 ret = 0;
492d29ea
PA
1420 TRY
1421 {
1422 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1423 }
492d29ea
PA
1424 CATCH (except, RETURN_MASK_ALL)
1425 {
6c63c96a 1426 replay_memory_access = old;
492d29ea
PA
1427 throw_exception (except);
1428 }
1429 END_CATCH
6c63c96a 1430 replay_memory_access = old;
633785ff
MM
1431
1432 return ret;
1433}
1434
1f3ef581
MM
1435/* The to_fetch_registers method of target record-btrace. */
1436
1437static void
1438record_btrace_fetch_registers (struct target_ops *ops,
1439 struct regcache *regcache, int regno)
1440{
1441 struct btrace_insn_iterator *replay;
1442 struct thread_info *tp;
1443
1444 tp = find_thread_ptid (inferior_ptid);
1445 gdb_assert (tp != NULL);
1446
1447 replay = tp->btrace.replay;
aef92902 1448 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1449 {
1450 const struct btrace_insn *insn;
1451 struct gdbarch *gdbarch;
1452 int pcreg;
1453
1454 gdbarch = get_regcache_arch (regcache);
1455 pcreg = gdbarch_pc_regnum (gdbarch);
1456 if (pcreg < 0)
1457 return;
1458
1459 /* We can only provide the PC register. */
1460 if (regno >= 0 && regno != pcreg)
1461 return;
1462
1463 insn = btrace_insn_get (replay);
1464 gdb_assert (insn != NULL);
1465
1466 regcache_raw_supply (regcache, regno, &insn->pc);
1467 }
1468 else
1469 {
e75fdfca 1470 struct target_ops *t = ops->beneath;
1f3ef581 1471
e75fdfca 1472 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1473 }
1474}
1475
1476/* The to_store_registers method of target record-btrace. */
1477
1478static void
1479record_btrace_store_registers (struct target_ops *ops,
1480 struct regcache *regcache, int regno)
1481{
1482 struct target_ops *t;
1483
a52eab48 1484 if (!record_btrace_generating_corefile
4d10e986
MM
1485 && record_btrace_is_replaying (ops, inferior_ptid))
1486 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1487
1488 gdb_assert (may_write_registers != 0);
1489
e75fdfca
TT
1490 t = ops->beneath;
1491 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1492}
1493
1494/* The to_prepare_to_store method of target record-btrace. */
1495
1496static void
1497record_btrace_prepare_to_store (struct target_ops *ops,
1498 struct regcache *regcache)
1499{
1500 struct target_ops *t;
1501
a52eab48 1502 if (!record_btrace_generating_corefile
4d10e986 1503 && record_btrace_is_replaying (ops, inferior_ptid))
1f3ef581
MM
1504 return;
1505
e75fdfca
TT
1506 t = ops->beneath;
1507 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1508}
1509
0b722aec
MM
1510/* The branch trace frame cache. */
1511
1512struct btrace_frame_cache
1513{
1514 /* The thread. */
1515 struct thread_info *tp;
1516
1517 /* The frame info. */
1518 struct frame_info *frame;
1519
1520 /* The branch trace function segment. */
1521 const struct btrace_function *bfun;
1522};
1523
1524/* A struct btrace_frame_cache hash table indexed by NEXT. */
1525
1526static htab_t bfcache;
1527
1528/* hash_f for htab_create_alloc of bfcache. */
1529
1530static hashval_t
1531bfcache_hash (const void *arg)
1532{
19ba03f4
SM
1533 const struct btrace_frame_cache *cache
1534 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1535
1536 return htab_hash_pointer (cache->frame);
1537}
1538
1539/* eq_f for htab_create_alloc of bfcache. */
1540
1541static int
1542bfcache_eq (const void *arg1, const void *arg2)
1543{
19ba03f4
SM
1544 const struct btrace_frame_cache *cache1
1545 = (const struct btrace_frame_cache *) arg1;
1546 const struct btrace_frame_cache *cache2
1547 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1548
1549 return cache1->frame == cache2->frame;
1550}
1551
1552/* Create a new btrace frame cache. */
1553
1554static struct btrace_frame_cache *
1555bfcache_new (struct frame_info *frame)
1556{
1557 struct btrace_frame_cache *cache;
1558 void **slot;
1559
1560 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1561 cache->frame = frame;
1562
1563 slot = htab_find_slot (bfcache, cache, INSERT);
1564 gdb_assert (*slot == NULL);
1565 *slot = cache;
1566
1567 return cache;
1568}
1569
1570/* Extract the branch trace function from a branch trace frame. */
1571
1572static const struct btrace_function *
1573btrace_get_frame_function (struct frame_info *frame)
1574{
1575 const struct btrace_frame_cache *cache;
1576 const struct btrace_function *bfun;
1577 struct btrace_frame_cache pattern;
1578 void **slot;
1579
1580 pattern.frame = frame;
1581
1582 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1583 if (slot == NULL)
1584 return NULL;
1585
19ba03f4 1586 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1587 return cache->bfun;
1588}
1589
cecac1ab
MM
1590/* Implement stop_reason method for record_btrace_frame_unwind. */
1591
1592static enum unwind_stop_reason
1593record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1594 void **this_cache)
1595{
0b722aec
MM
1596 const struct btrace_frame_cache *cache;
1597 const struct btrace_function *bfun;
1598
19ba03f4 1599 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1600 bfun = cache->bfun;
1601 gdb_assert (bfun != NULL);
1602
1603 if (bfun->up == NULL)
1604 return UNWIND_UNAVAILABLE;
1605
1606 return UNWIND_NO_REASON;
cecac1ab
MM
1607}
1608
1609/* Implement this_id method for record_btrace_frame_unwind. */
1610
1611static void
1612record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1613 struct frame_id *this_id)
1614{
0b722aec
MM
1615 const struct btrace_frame_cache *cache;
1616 const struct btrace_function *bfun;
1617 CORE_ADDR code, special;
1618
19ba03f4 1619 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1620
1621 bfun = cache->bfun;
1622 gdb_assert (bfun != NULL);
1623
1624 while (bfun->segment.prev != NULL)
1625 bfun = bfun->segment.prev;
1626
1627 code = get_frame_func (this_frame);
1628 special = bfun->number;
1629
1630 *this_id = frame_id_build_unavailable_stack_special (code, special);
1631
1632 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1633 btrace_get_bfun_name (cache->bfun),
1634 core_addr_to_string_nz (this_id->code_addr),
1635 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1636}
1637
1638/* Implement prev_register method for record_btrace_frame_unwind. */
1639
1640static struct value *
1641record_btrace_frame_prev_register (struct frame_info *this_frame,
1642 void **this_cache,
1643 int regnum)
1644{
0b722aec
MM
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun, *caller;
1647 const struct btrace_insn *insn;
1648 struct gdbarch *gdbarch;
1649 CORE_ADDR pc;
1650 int pcreg;
1651
1652 gdbarch = get_frame_arch (this_frame);
1653 pcreg = gdbarch_pc_regnum (gdbarch);
1654 if (pcreg < 0 || regnum != pcreg)
1655 throw_error (NOT_AVAILABLE_ERROR,
1656 _("Registers are not available in btrace record history"));
1657
19ba03f4 1658 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1659 bfun = cache->bfun;
1660 gdb_assert (bfun != NULL);
1661
1662 caller = bfun->up;
1663 if (caller == NULL)
1664 throw_error (NOT_AVAILABLE_ERROR,
1665 _("No caller in btrace record history"));
1666
1667 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1668 {
1669 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1670 pc = insn->pc;
1671 }
1672 else
1673 {
1674 insn = VEC_last (btrace_insn_s, caller->insn);
1675 pc = insn->pc;
1676
1677 pc += gdb_insn_length (gdbarch, pc);
1678 }
1679
1680 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1681 btrace_get_bfun_name (bfun), bfun->level,
1682 core_addr_to_string_nz (pc));
1683
1684 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1685}
1686
1687/* Implement sniffer method for record_btrace_frame_unwind. */
1688
1689static int
1690record_btrace_frame_sniffer (const struct frame_unwind *self,
1691 struct frame_info *this_frame,
1692 void **this_cache)
1693{
0b722aec
MM
1694 const struct btrace_function *bfun;
1695 struct btrace_frame_cache *cache;
cecac1ab 1696 struct thread_info *tp;
0b722aec 1697 struct frame_info *next;
cecac1ab
MM
1698
1699 /* THIS_FRAME does not contain a reference to its thread. */
1700 tp = find_thread_ptid (inferior_ptid);
1701 gdb_assert (tp != NULL);
1702
0b722aec
MM
1703 bfun = NULL;
1704 next = get_next_frame (this_frame);
1705 if (next == NULL)
1706 {
1707 const struct btrace_insn_iterator *replay;
1708
1709 replay = tp->btrace.replay;
1710 if (replay != NULL)
1711 bfun = replay->function;
1712 }
1713 else
1714 {
1715 const struct btrace_function *callee;
1716
1717 callee = btrace_get_frame_function (next);
1718 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1719 bfun = callee->up;
1720 }
1721
1722 if (bfun == NULL)
1723 return 0;
1724
1725 DEBUG ("[frame] sniffed frame for %s on level %d",
1726 btrace_get_bfun_name (bfun), bfun->level);
1727
1728 /* This is our frame. Initialize the frame cache. */
1729 cache = bfcache_new (this_frame);
1730 cache->tp = tp;
1731 cache->bfun = bfun;
1732
1733 *this_cache = cache;
1734 return 1;
1735}
1736
1737/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1738
1739static int
1740record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1741 struct frame_info *this_frame,
1742 void **this_cache)
1743{
1744 const struct btrace_function *bfun, *callee;
1745 struct btrace_frame_cache *cache;
1746 struct frame_info *next;
1747
1748 next = get_next_frame (this_frame);
1749 if (next == NULL)
1750 return 0;
1751
1752 callee = btrace_get_frame_function (next);
1753 if (callee == NULL)
1754 return 0;
1755
1756 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1757 return 0;
1758
1759 bfun = callee->up;
1760 if (bfun == NULL)
1761 return 0;
1762
1763 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1764 btrace_get_bfun_name (bfun), bfun->level);
1765
1766 /* This is our frame. Initialize the frame cache. */
1767 cache = bfcache_new (this_frame);
1768 cache->tp = find_thread_ptid (inferior_ptid);
1769 cache->bfun = bfun;
1770
1771 *this_cache = cache;
1772 return 1;
1773}
1774
1775static void
1776record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1777{
1778 struct btrace_frame_cache *cache;
1779 void **slot;
1780
19ba03f4 1781 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1782
1783 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1784 gdb_assert (slot != NULL);
1785
1786 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1787}
1788
1789/* btrace recording does not store previous memory content, neither the stack
1790 frames content. Any unwinding would return errorneous results as the stack
1791 contents no longer matches the changed PC value restored from history.
1792 Therefore this unwinder reports any possibly unwound registers as
1793 <unavailable>. */
1794
0b722aec 1795const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1796{
1797 NORMAL_FRAME,
1798 record_btrace_frame_unwind_stop_reason,
1799 record_btrace_frame_this_id,
1800 record_btrace_frame_prev_register,
1801 NULL,
0b722aec
MM
1802 record_btrace_frame_sniffer,
1803 record_btrace_frame_dealloc_cache
1804};
1805
1806const struct frame_unwind record_btrace_tailcall_frame_unwind =
1807{
1808 TAILCALL_FRAME,
1809 record_btrace_frame_unwind_stop_reason,
1810 record_btrace_frame_this_id,
1811 record_btrace_frame_prev_register,
1812 NULL,
1813 record_btrace_tailcall_frame_sniffer,
1814 record_btrace_frame_dealloc_cache
cecac1ab 1815};
b2f4cfde 1816
ac01945b
TT
1817/* Implement the to_get_unwinder method. */
1818
1819static const struct frame_unwind *
1820record_btrace_to_get_unwinder (struct target_ops *self)
1821{
1822 return &record_btrace_frame_unwind;
1823}
1824
1825/* Implement the to_get_tailcall_unwinder method. */
1826
1827static const struct frame_unwind *
1828record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1829{
1830 return &record_btrace_tailcall_frame_unwind;
1831}
1832
987e68b1
MM
1833/* Return a human-readable string for FLAG. */
1834
1835static const char *
1836btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1837{
1838 switch (flag)
1839 {
1840 case BTHR_STEP:
1841 return "step";
1842
1843 case BTHR_RSTEP:
1844 return "reverse-step";
1845
1846 case BTHR_CONT:
1847 return "cont";
1848
1849 case BTHR_RCONT:
1850 return "reverse-cont";
1851
1852 case BTHR_STOP:
1853 return "stop";
1854 }
1855
1856 return "<invalid>";
1857}
1858
52834460
MM
1859/* Indicate that TP should be resumed according to FLAG. */
1860
1861static void
1862record_btrace_resume_thread (struct thread_info *tp,
1863 enum btrace_thread_flag flag)
1864{
1865 struct btrace_thread_info *btinfo;
1866
43792cf0 1867 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1868 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1869
1870 btinfo = &tp->btrace;
1871
52834460
MM
1872 /* Fetch the latest branch trace. */
1873 btrace_fetch (tp);
1874
0ca912df
MM
1875 /* A resume request overwrites a preceding resume or stop request. */
1876 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1877 btinfo->flags |= flag;
1878}
1879
ec71cc2f
MM
1880/* Get the current frame for TP. */
1881
1882static struct frame_info *
1883get_thread_current_frame (struct thread_info *tp)
1884{
1885 struct frame_info *frame;
1886 ptid_t old_inferior_ptid;
1887 int executing;
1888
1889 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1890 old_inferior_ptid = inferior_ptid;
1891 inferior_ptid = tp->ptid;
1892
1893 /* Clear the executing flag to allow changes to the current frame.
1894 We are not actually running, yet. We just started a reverse execution
1895 command or a record goto command.
1896 For the latter, EXECUTING is false and this has no effect.
1897 For the former, EXECUTING is true and we're in to_wait, about to
1898 move the thread. Since we need to recompute the stack, we temporarily
1899 set EXECUTING to flase. */
1900 executing = is_executing (inferior_ptid);
1901 set_executing (inferior_ptid, 0);
1902
1903 frame = NULL;
1904 TRY
1905 {
1906 frame = get_current_frame ();
1907 }
1908 CATCH (except, RETURN_MASK_ALL)
1909 {
1910 /* Restore the previous execution state. */
1911 set_executing (inferior_ptid, executing);
1912
1913 /* Restore the previous inferior_ptid. */
1914 inferior_ptid = old_inferior_ptid;
1915
1916 throw_exception (except);
1917 }
1918 END_CATCH
1919
1920 /* Restore the previous execution state. */
1921 set_executing (inferior_ptid, executing);
1922
1923 /* Restore the previous inferior_ptid. */
1924 inferior_ptid = old_inferior_ptid;
1925
1926 return frame;
1927}
1928
52834460
MM
1929/* Start replaying a thread. */
1930
1931static struct btrace_insn_iterator *
1932record_btrace_start_replaying (struct thread_info *tp)
1933{
52834460
MM
1934 struct btrace_insn_iterator *replay;
1935 struct btrace_thread_info *btinfo;
52834460
MM
1936
1937 btinfo = &tp->btrace;
1938 replay = NULL;
1939
1940 /* We can't start replaying without trace. */
1941 if (btinfo->begin == NULL)
1942 return NULL;
1943
52834460
MM
1944 /* GDB stores the current frame_id when stepping in order to detects steps
1945 into subroutines.
1946 Since frames are computed differently when we're replaying, we need to
1947 recompute those stored frames and fix them up so we can still detect
1948 subroutines after we started replaying. */
492d29ea 1949 TRY
52834460
MM
1950 {
1951 struct frame_info *frame;
1952 struct frame_id frame_id;
1953 int upd_step_frame_id, upd_step_stack_frame_id;
1954
1955 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1956 frame = get_thread_current_frame (tp);
52834460
MM
1957 frame_id = get_frame_id (frame);
1958
1959 /* Check if we need to update any stepping-related frame id's. */
1960 upd_step_frame_id = frame_id_eq (frame_id,
1961 tp->control.step_frame_id);
1962 upd_step_stack_frame_id = frame_id_eq (frame_id,
1963 tp->control.step_stack_frame_id);
1964
1965 /* We start replaying at the end of the branch trace. This corresponds
1966 to the current instruction. */
8d749320 1967 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1968 btrace_insn_end (replay, btinfo);
1969
31fd9caa
MM
1970 /* Skip gaps at the end of the trace. */
1971 while (btrace_insn_get (replay) == NULL)
1972 {
1973 unsigned int steps;
1974
1975 steps = btrace_insn_prev (replay, 1);
1976 if (steps == 0)
1977 error (_("No trace."));
1978 }
1979
52834460
MM
1980 /* We're not replaying, yet. */
1981 gdb_assert (btinfo->replay == NULL);
1982 btinfo->replay = replay;
1983
1984 /* Make sure we're not using any stale registers. */
1985 registers_changed_ptid (tp->ptid);
1986
1987 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1988 frame = get_thread_current_frame (tp);
52834460
MM
1989 frame_id = get_frame_id (frame);
1990
1991 /* Replace stepping related frames where necessary. */
1992 if (upd_step_frame_id)
1993 tp->control.step_frame_id = frame_id;
1994 if (upd_step_stack_frame_id)
1995 tp->control.step_stack_frame_id = frame_id;
1996 }
492d29ea 1997 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1998 {
1999 xfree (btinfo->replay);
2000 btinfo->replay = NULL;
2001
2002 registers_changed_ptid (tp->ptid);
2003
2004 throw_exception (except);
2005 }
492d29ea 2006 END_CATCH
52834460
MM
2007
2008 return replay;
2009}
2010
2011/* Stop replaying a thread. */
2012
2013static void
2014record_btrace_stop_replaying (struct thread_info *tp)
2015{
2016 struct btrace_thread_info *btinfo;
2017
2018 btinfo = &tp->btrace;
2019
2020 xfree (btinfo->replay);
2021 btinfo->replay = NULL;
2022
2023 /* Make sure we're not leaving any stale registers. */
2024 registers_changed_ptid (tp->ptid);
2025}
2026
e3cfc1c7
MM
2027/* Stop replaying TP if it is at the end of its execution history. */
2028
2029static void
2030record_btrace_stop_replaying_at_end (struct thread_info *tp)
2031{
2032 struct btrace_insn_iterator *replay, end;
2033 struct btrace_thread_info *btinfo;
2034
2035 btinfo = &tp->btrace;
2036 replay = btinfo->replay;
2037
2038 if (replay == NULL)
2039 return;
2040
2041 btrace_insn_end (&end, btinfo);
2042
2043 if (btrace_insn_cmp (replay, &end) == 0)
2044 record_btrace_stop_replaying (tp);
2045}
2046
b2f4cfde
MM
2047/* The to_resume method of target record-btrace. */
2048
2049static void
2050record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2051 enum gdb_signal signal)
2052{
0ca912df 2053 struct thread_info *tp;
d2939ba2 2054 enum btrace_thread_flag flag, cflag;
52834460 2055
987e68b1
MM
2056 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2057 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2058 step ? "step" : "cont");
52834460 2059
0ca912df
MM
2060 /* Store the execution direction of the last resume.
2061
2062 If there is more than one to_resume call, we have to rely on infrun
2063 to not change the execution direction in-between. */
70ad5bff
MM
2064 record_btrace_resume_exec_dir = execution_direction;
2065
0ca912df 2066 /* As long as we're not replaying, just forward the request.
52834460 2067
0ca912df
MM
2068 For non-stop targets this means that no thread is replaying. In order to
2069 make progress, we may need to explicitly move replaying threads to the end
2070 of their execution history. */
a52eab48
MM
2071 if ((execution_direction != EXEC_REVERSE)
2072 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2073 {
e75fdfca 2074 ops = ops->beneath;
04c4fe8c
MM
2075 ops->to_resume (ops, ptid, step, signal);
2076 return;
b2f4cfde
MM
2077 }
2078
52834460 2079 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2080 if (execution_direction == EXEC_REVERSE)
2081 {
2082 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2083 cflag = BTHR_RCONT;
2084 }
52834460 2085 else
d2939ba2
MM
2086 {
2087 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2088 cflag = BTHR_CONT;
2089 }
52834460 2090
52834460 2091 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2092 record_btrace_wait below.
2093
2094 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2095 if (!target_is_non_stop_p ())
2096 {
2097 gdb_assert (ptid_match (inferior_ptid, ptid));
2098
2099 ALL_NON_EXITED_THREADS (tp)
2100 if (ptid_match (tp->ptid, ptid))
2101 {
2102 if (ptid_match (tp->ptid, inferior_ptid))
2103 record_btrace_resume_thread (tp, flag);
2104 else
2105 record_btrace_resume_thread (tp, cflag);
2106 }
2107 }
2108 else
2109 {
2110 ALL_NON_EXITED_THREADS (tp)
2111 if (ptid_match (tp->ptid, ptid))
2112 record_btrace_resume_thread (tp, flag);
2113 }
70ad5bff
MM
2114
2115 /* Async support. */
2116 if (target_can_async_p ())
2117 {
6a3753b3 2118 target_async (1);
70ad5bff
MM
2119 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2120 }
52834460
MM
2121}
2122
987e68b1
MM
2123/* Cancel resuming TP. */
2124
2125static void
2126record_btrace_cancel_resume (struct thread_info *tp)
2127{
2128 enum btrace_thread_flag flags;
2129
2130 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2131 if (flags == 0)
2132 return;
2133
43792cf0
PA
2134 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2135 print_thread_id (tp),
987e68b1
MM
2136 target_pid_to_str (tp->ptid), flags,
2137 btrace_thread_flag_to_str (flags));
2138
2139 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2140 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2141}
2142
2143/* Return a target_waitstatus indicating that we ran out of history. */
2144
2145static struct target_waitstatus
2146btrace_step_no_history (void)
2147{
2148 struct target_waitstatus status;
2149
2150 status.kind = TARGET_WAITKIND_NO_HISTORY;
2151
2152 return status;
2153}
2154
2155/* Return a target_waitstatus indicating that a step finished. */
2156
2157static struct target_waitstatus
2158btrace_step_stopped (void)
2159{
2160 struct target_waitstatus status;
2161
2162 status.kind = TARGET_WAITKIND_STOPPED;
2163 status.value.sig = GDB_SIGNAL_TRAP;
2164
2165 return status;
2166}
2167
6e4879f0
MM
2168/* Return a target_waitstatus indicating that a thread was stopped as
2169 requested. */
2170
2171static struct target_waitstatus
2172btrace_step_stopped_on_request (void)
2173{
2174 struct target_waitstatus status;
2175
2176 status.kind = TARGET_WAITKIND_STOPPED;
2177 status.value.sig = GDB_SIGNAL_0;
2178
2179 return status;
2180}
2181
d825d248
MM
2182/* Return a target_waitstatus indicating a spurious stop. */
2183
2184static struct target_waitstatus
2185btrace_step_spurious (void)
2186{
2187 struct target_waitstatus status;
2188
2189 status.kind = TARGET_WAITKIND_SPURIOUS;
2190
2191 return status;
2192}
2193
e3cfc1c7
MM
2194/* Return a target_waitstatus indicating that the thread was not resumed. */
2195
2196static struct target_waitstatus
2197btrace_step_no_resumed (void)
2198{
2199 struct target_waitstatus status;
2200
2201 status.kind = TARGET_WAITKIND_NO_RESUMED;
2202
2203 return status;
2204}
2205
2206/* Return a target_waitstatus indicating that we should wait again. */
2207
2208static struct target_waitstatus
2209btrace_step_again (void)
2210{
2211 struct target_waitstatus status;
2212
2213 status.kind = TARGET_WAITKIND_IGNORE;
2214
2215 return status;
2216}
2217
52834460
MM
2218/* Clear the record histories. */
2219
2220static void
2221record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2222{
2223 xfree (btinfo->insn_history);
2224 xfree (btinfo->call_history);
2225
2226 btinfo->insn_history = NULL;
2227 btinfo->call_history = NULL;
2228}
2229
3c615f99
MM
2230/* Check whether TP's current replay position is at a breakpoint. */
2231
2232static int
2233record_btrace_replay_at_breakpoint (struct thread_info *tp)
2234{
2235 struct btrace_insn_iterator *replay;
2236 struct btrace_thread_info *btinfo;
2237 const struct btrace_insn *insn;
2238 struct inferior *inf;
2239
2240 btinfo = &tp->btrace;
2241 replay = btinfo->replay;
2242
2243 if (replay == NULL)
2244 return 0;
2245
2246 insn = btrace_insn_get (replay);
2247 if (insn == NULL)
2248 return 0;
2249
2250 inf = find_inferior_ptid (tp->ptid);
2251 if (inf == NULL)
2252 return 0;
2253
2254 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2255 &btinfo->stop_reason);
2256}
2257
d825d248 2258/* Step one instruction in forward direction. */
52834460
MM
2259
2260static struct target_waitstatus
d825d248 2261record_btrace_single_step_forward (struct thread_info *tp)
52834460
MM
2262{
2263 struct btrace_insn_iterator *replay, end;
2264 struct btrace_thread_info *btinfo;
52834460 2265
d825d248
MM
2266 btinfo = &tp->btrace;
2267 replay = btinfo->replay;
2268
2269 /* We're done if we're not replaying. */
2270 if (replay == NULL)
2271 return btrace_step_no_history ();
2272
011c71b6
MM
2273 /* Check if we're stepping a breakpoint. */
2274 if (record_btrace_replay_at_breakpoint (tp))
2275 return btrace_step_stopped ();
2276
d825d248
MM
2277 /* Skip gaps during replay. */
2278 do
2279 {
2280 unsigned int steps;
2281
e3cfc1c7
MM
2282 /* We will bail out here if we continue stepping after reaching the end
2283 of the execution history. */
d825d248
MM
2284 steps = btrace_insn_next (replay, 1);
2285 if (steps == 0)
e3cfc1c7 2286 return btrace_step_no_history ();
d825d248
MM
2287 }
2288 while (btrace_insn_get (replay) == NULL);
2289
2290 /* Determine the end of the instruction trace. */
2291 btrace_insn_end (&end, btinfo);
2292
e3cfc1c7
MM
2293 /* The execution trace contains (and ends with) the current instruction.
2294 This instruction has not been executed, yet, so the trace really ends
2295 one instruction earlier. */
d825d248 2296 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2297 return btrace_step_no_history ();
d825d248
MM
2298
2299 return btrace_step_spurious ();
2300}
2301
2302/* Step one instruction in backward direction. */
2303
2304static struct target_waitstatus
2305record_btrace_single_step_backward (struct thread_info *tp)
2306{
2307 struct btrace_insn_iterator *replay;
2308 struct btrace_thread_info *btinfo;
e59fa00f 2309
52834460
MM
2310 btinfo = &tp->btrace;
2311 replay = btinfo->replay;
2312
d825d248
MM
2313 /* Start replaying if we're not already doing so. */
2314 if (replay == NULL)
2315 replay = record_btrace_start_replaying (tp);
2316
2317 /* If we can't step any further, we reached the end of the history.
2318 Skip gaps during replay. */
2319 do
2320 {
2321 unsigned int steps;
2322
2323 steps = btrace_insn_prev (replay, 1);
2324 if (steps == 0)
2325 return btrace_step_no_history ();
2326 }
2327 while (btrace_insn_get (replay) == NULL);
2328
011c71b6
MM
2329 /* Check if we're stepping a breakpoint.
2330
2331 For reverse-stepping, this check is after the step. There is logic in
2332 infrun.c that handles reverse-stepping separately. See, for example,
2333 proceed and adjust_pc_after_break.
2334
2335 This code assumes that for reverse-stepping, PC points to the last
2336 de-executed instruction, whereas for forward-stepping PC points to the
2337 next to-be-executed instruction. */
2338 if (record_btrace_replay_at_breakpoint (tp))
2339 return btrace_step_stopped ();
2340
d825d248
MM
2341 return btrace_step_spurious ();
2342}
2343
2344/* Step a single thread. */
2345
2346static struct target_waitstatus
2347record_btrace_step_thread (struct thread_info *tp)
2348{
2349 struct btrace_thread_info *btinfo;
2350 struct target_waitstatus status;
2351 enum btrace_thread_flag flags;
2352
2353 btinfo = &tp->btrace;
2354
6e4879f0
MM
2355 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2356 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2357
43792cf0 2358 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2359 target_pid_to_str (tp->ptid), flags,
2360 btrace_thread_flag_to_str (flags));
52834460 2361
6e4879f0
MM
2362 /* We can't step without an execution history. */
2363 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2364 return btrace_step_no_history ();
2365
52834460
MM
2366 switch (flags)
2367 {
2368 default:
2369 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2370
6e4879f0
MM
2371 case BTHR_STOP:
2372 return btrace_step_stopped_on_request ();
2373
52834460 2374 case BTHR_STEP:
d825d248
MM
2375 status = record_btrace_single_step_forward (tp);
2376 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2377 break;
52834460
MM
2378
2379 return btrace_step_stopped ();
2380
2381 case BTHR_RSTEP:
d825d248
MM
2382 status = record_btrace_single_step_backward (tp);
2383 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2384 break;
52834460
MM
2385
2386 return btrace_step_stopped ();
2387
2388 case BTHR_CONT:
e3cfc1c7
MM
2389 status = record_btrace_single_step_forward (tp);
2390 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2391 break;
52834460 2392
e3cfc1c7
MM
2393 btinfo->flags |= flags;
2394 return btrace_step_again ();
52834460
MM
2395
2396 case BTHR_RCONT:
e3cfc1c7
MM
2397 status = record_btrace_single_step_backward (tp);
2398 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2399 break;
52834460 2400
e3cfc1c7
MM
2401 btinfo->flags |= flags;
2402 return btrace_step_again ();
2403 }
d825d248 2404
e3cfc1c7
MM
2405 /* We keep threads moving at the end of their execution history. The to_wait
2406 method will stop the thread for whom the event is reported. */
2407 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2408 btinfo->flags |= flags;
52834460 2409
e3cfc1c7 2410 return status;
b2f4cfde
MM
2411}
2412
e3cfc1c7
MM
2413/* A vector of threads. */
2414
2415typedef struct thread_info * tp_t;
2416DEF_VEC_P (tp_t);
2417
a6b5be76
MM
2418/* Announce further events if necessary. */
2419
2420static void
2421record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2422 const VEC (tp_t) *no_history)
2423{
2424 int more_moving, more_no_history;
2425
2426 more_moving = !VEC_empty (tp_t, moving);
2427 more_no_history = !VEC_empty (tp_t, no_history);
2428
2429 if (!more_moving && !more_no_history)
2430 return;
2431
2432 if (more_moving)
2433 DEBUG ("movers pending");
2434
2435 if (more_no_history)
2436 DEBUG ("no-history pending");
2437
2438 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2439}
2440
b2f4cfde
MM
2441/* The to_wait method of target record-btrace. */
2442
2443static ptid_t
2444record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2445 struct target_waitstatus *status, int options)
2446{
e3cfc1c7
MM
2447 VEC (tp_t) *moving, *no_history;
2448 struct thread_info *tp, *eventing;
2449 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
52834460
MM
2450
2451 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2452
b2f4cfde 2453 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2454 if ((execution_direction != EXEC_REVERSE)
2455 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2456 {
e75fdfca
TT
2457 ops = ops->beneath;
2458 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2459 }
2460
e3cfc1c7
MM
2461 moving = NULL;
2462 no_history = NULL;
2463
2464 make_cleanup (VEC_cleanup (tp_t), &moving);
2465 make_cleanup (VEC_cleanup (tp_t), &no_history);
2466
2467 /* Keep a work list of moving threads. */
2468 ALL_NON_EXITED_THREADS (tp)
2469 if (ptid_match (tp->ptid, ptid)
2470 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2471 VEC_safe_push (tp_t, moving, tp);
2472
2473 if (VEC_empty (tp_t, moving))
52834460 2474 {
e3cfc1c7 2475 *status = btrace_step_no_resumed ();
52834460 2476
e3cfc1c7
MM
2477 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2478 target_waitstatus_to_string (status));
2479
2480 do_cleanups (cleanups);
2481 return null_ptid;
52834460
MM
2482 }
2483
e3cfc1c7
MM
2484 /* Step moving threads one by one, one step each, until either one thread
2485 reports an event or we run out of threads to step.
2486
2487 When stepping more than one thread, chances are that some threads reach
2488 the end of their execution history earlier than others. If we reported
2489 this immediately, all-stop on top of non-stop would stop all threads and
2490 resume the same threads next time. And we would report the same thread
2491 having reached the end of its execution history again.
2492
2493 In the worst case, this would starve the other threads. But even if other
2494 threads would be allowed to make progress, this would result in far too
2495 many intermediate stops.
2496
2497 We therefore delay the reporting of "no execution history" until we have
2498 nothing else to report. By this time, all threads should have moved to
2499 either the beginning or the end of their execution history. There will
2500 be a single user-visible stop. */
2501 eventing = NULL;
2502 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2503 {
2504 unsigned int ix;
2505
2506 ix = 0;
2507 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2508 {
2509 *status = record_btrace_step_thread (tp);
2510
2511 switch (status->kind)
2512 {
2513 case TARGET_WAITKIND_IGNORE:
2514 ix++;
2515 break;
2516
2517 case TARGET_WAITKIND_NO_HISTORY:
2518 VEC_safe_push (tp_t, no_history,
2519 VEC_ordered_remove (tp_t, moving, ix));
2520 break;
2521
2522 default:
2523 eventing = VEC_unordered_remove (tp_t, moving, ix);
2524 break;
2525 }
2526 }
2527 }
2528
2529 if (eventing == NULL)
2530 {
2531 /* We started with at least one moving thread. This thread must have
2532 either stopped or reached the end of its execution history.
2533
2534 In the former case, EVENTING must not be NULL.
2535 In the latter case, NO_HISTORY must not be empty. */
2536 gdb_assert (!VEC_empty (tp_t, no_history));
2537
2538 /* We kept threads moving at the end of their execution history. Stop
2539 EVENTING now that we are going to report its stop. */
2540 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2541 eventing->btrace.flags &= ~BTHR_MOVE;
2542
2543 *status = btrace_step_no_history ();
2544 }
2545
2546 gdb_assert (eventing != NULL);
2547
2548 /* We kept threads replaying at the end of their execution history. Stop
2549 replaying EVENTING now that we are going to report its stop. */
2550 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2551
2552 /* Stop all other threads. */
5953356c 2553 if (!target_is_non_stop_p ())
e3cfc1c7
MM
2554 ALL_NON_EXITED_THREADS (tp)
2555 record_btrace_cancel_resume (tp);
52834460 2556
a6b5be76
MM
2557 /* In async mode, we need to announce further events. */
2558 if (target_is_async_p ())
2559 record_btrace_maybe_mark_async_event (moving, no_history);
2560
52834460 2561 /* Start record histories anew from the current position. */
e3cfc1c7 2562 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2563
2564 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2565 registers_changed_ptid (eventing->ptid);
2566
43792cf0
PA
2567 DEBUG ("wait ended by thread %s (%s): %s",
2568 print_thread_id (eventing),
e3cfc1c7
MM
2569 target_pid_to_str (eventing->ptid),
2570 target_waitstatus_to_string (status));
52834460 2571
e3cfc1c7
MM
2572 do_cleanups (cleanups);
2573 return eventing->ptid;
52834460
MM
2574}
2575
6e4879f0
MM
2576/* The to_stop method of target record-btrace. */
2577
2578static void
2579record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2580{
2581 DEBUG ("stop %s", target_pid_to_str (ptid));
2582
2583 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2584 if ((execution_direction != EXEC_REVERSE)
2585 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2586 {
2587 ops = ops->beneath;
2588 ops->to_stop (ops, ptid);
2589 }
2590 else
2591 {
2592 struct thread_info *tp;
2593
2594 ALL_NON_EXITED_THREADS (tp)
2595 if (ptid_match (tp->ptid, ptid))
2596 {
2597 tp->btrace.flags &= ~BTHR_MOVE;
2598 tp->btrace.flags |= BTHR_STOP;
2599 }
2600 }
2601 }
2602
52834460
MM
2603/* The to_can_execute_reverse method of target record-btrace. */
2604
2605static int
19db3e69 2606record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2607{
2608 return 1;
2609}
2610
9e8915c6 2611/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2612
9e8915c6
PA
2613static int
2614record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2615{
a52eab48 2616 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2617 {
2618 struct thread_info *tp = inferior_thread ();
2619
2620 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2621 }
2622
2623 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2624}
2625
2626/* The to_supports_stopped_by_sw_breakpoint method of target
2627 record-btrace. */
2628
2629static int
2630record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2631{
a52eab48 2632 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2633 return 1;
2634
2635 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2636}
2637
2638/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2639
2640static int
2641record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2642{
a52eab48 2643 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2644 {
2645 struct thread_info *tp = inferior_thread ();
2646
2647 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2648 }
2649
2650 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2651}
2652
2653/* The to_supports_stopped_by_hw_breakpoint method of target
2654 record-btrace. */
2655
2656static int
2657record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2658{
a52eab48 2659 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2660 return 1;
52834460 2661
9e8915c6 2662 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2663}
2664
e8032dde 2665/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2666
2667static void
e8032dde 2668record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2669{
e8032dde 2670 /* We don't add or remove threads during replay. */
a52eab48 2671 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2672 return;
2673
2674 /* Forward the request. */
e75fdfca 2675 ops = ops->beneath;
e8032dde 2676 ops->to_update_thread_list (ops);
e2887aa3
MM
2677}
2678
2679/* The to_thread_alive method of target record-btrace. */
2680
2681static int
2682record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2683{
2684 /* We don't add or remove threads during replay. */
a52eab48 2685 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2686 return find_thread_ptid (ptid) != NULL;
2687
2688 /* Forward the request. */
e75fdfca
TT
2689 ops = ops->beneath;
2690 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2691}
2692
066ce621
MM
2693/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2694 is stopped. */
2695
2696static void
2697record_btrace_set_replay (struct thread_info *tp,
2698 const struct btrace_insn_iterator *it)
2699{
2700 struct btrace_thread_info *btinfo;
2701
2702 btinfo = &tp->btrace;
2703
2704 if (it == NULL || it->function == NULL)
52834460 2705 record_btrace_stop_replaying (tp);
066ce621
MM
2706 else
2707 {
2708 if (btinfo->replay == NULL)
52834460 2709 record_btrace_start_replaying (tp);
066ce621
MM
2710 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2711 return;
2712
2713 *btinfo->replay = *it;
52834460 2714 registers_changed_ptid (tp->ptid);
066ce621
MM
2715 }
2716
52834460
MM
2717 /* Start anew from the new replay position. */
2718 record_btrace_clear_histories (btinfo);
485668e5
MM
2719
2720 stop_pc = regcache_read_pc (get_current_regcache ());
2721 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2722}
2723
2724/* The to_goto_record_begin method of target record-btrace. */
2725
2726static void
08475817 2727record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2728{
2729 struct thread_info *tp;
2730 struct btrace_insn_iterator begin;
2731
2732 tp = require_btrace_thread ();
2733
2734 btrace_insn_begin (&begin, &tp->btrace);
2735 record_btrace_set_replay (tp, &begin);
066ce621
MM
2736}
2737
2738/* The to_goto_record_end method of target record-btrace. */
2739
2740static void
307a1b91 2741record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2742{
2743 struct thread_info *tp;
2744
2745 tp = require_btrace_thread ();
2746
2747 record_btrace_set_replay (tp, NULL);
066ce621
MM
2748}
2749
2750/* The to_goto_record method of target record-btrace. */
2751
2752static void
606183ac 2753record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2754{
2755 struct thread_info *tp;
2756 struct btrace_insn_iterator it;
2757 unsigned int number;
2758 int found;
2759
2760 number = insn;
2761
2762 /* Check for wrap-arounds. */
2763 if (number != insn)
2764 error (_("Instruction number out of range."));
2765
2766 tp = require_btrace_thread ();
2767
2768 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2769 if (found == 0)
2770 error (_("No such instruction."));
2771
2772 record_btrace_set_replay (tp, &it);
066ce621
MM
2773}
2774
797094dd
MM
2775/* The to_record_stop_replaying method of target record-btrace. */
2776
2777static void
2778record_btrace_stop_replaying_all (struct target_ops *self)
2779{
2780 struct thread_info *tp;
2781
2782 ALL_NON_EXITED_THREADS (tp)
2783 record_btrace_stop_replaying (tp);
2784}
2785
70ad5bff
MM
2786/* The to_execution_direction target method. */
2787
2788static enum exec_direction_kind
2789record_btrace_execution_direction (struct target_ops *self)
2790{
2791 return record_btrace_resume_exec_dir;
2792}
2793
aef92902
MM
2794/* The to_prepare_to_generate_core target method. */
2795
2796static void
2797record_btrace_prepare_to_generate_core (struct target_ops *self)
2798{
2799 record_btrace_generating_corefile = 1;
2800}
2801
2802/* The to_done_generating_core target method. */
2803
2804static void
2805record_btrace_done_generating_core (struct target_ops *self)
2806{
2807 record_btrace_generating_corefile = 0;
2808}
2809
afedecd3
MM
2810/* Initialize the record-btrace target ops. */
2811
2812static void
2813init_record_btrace_ops (void)
2814{
2815 struct target_ops *ops;
2816
2817 ops = &record_btrace_ops;
2818 ops->to_shortname = "record-btrace";
2819 ops->to_longname = "Branch tracing target";
2820 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2821 ops->to_open = record_btrace_open;
2822 ops->to_close = record_btrace_close;
b7d2e916 2823 ops->to_async = record_btrace_async;
afedecd3
MM
2824 ops->to_detach = record_detach;
2825 ops->to_disconnect = record_disconnect;
2826 ops->to_mourn_inferior = record_mourn_inferior;
2827 ops->to_kill = record_kill;
afedecd3
MM
2828 ops->to_stop_recording = record_btrace_stop_recording;
2829 ops->to_info_record = record_btrace_info;
2830 ops->to_insn_history = record_btrace_insn_history;
2831 ops->to_insn_history_from = record_btrace_insn_history_from;
2832 ops->to_insn_history_range = record_btrace_insn_history_range;
2833 ops->to_call_history = record_btrace_call_history;
2834 ops->to_call_history_from = record_btrace_call_history_from;
2835 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 2836 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2837 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2838 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2839 ops->to_xfer_partial = record_btrace_xfer_partial;
2840 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2841 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2842 ops->to_fetch_registers = record_btrace_fetch_registers;
2843 ops->to_store_registers = record_btrace_store_registers;
2844 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2845 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2846 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
2847 ops->to_resume = record_btrace_resume;
2848 ops->to_wait = record_btrace_wait;
6e4879f0 2849 ops->to_stop = record_btrace_stop;
e8032dde 2850 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2851 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2852 ops->to_goto_record_begin = record_btrace_goto_begin;
2853 ops->to_goto_record_end = record_btrace_goto_end;
2854 ops->to_goto_record = record_btrace_goto;
52834460 2855 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2856 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2857 ops->to_supports_stopped_by_sw_breakpoint
2858 = record_btrace_supports_stopped_by_sw_breakpoint;
2859 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2860 ops->to_supports_stopped_by_hw_breakpoint
2861 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2862 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2863 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2864 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2865 ops->to_stratum = record_stratum;
2866 ops->to_magic = OPS_MAGIC;
2867}
2868
f4abbc16
MM
2869/* Start recording in BTS format. */
2870
2871static void
2872cmd_record_btrace_bts_start (char *args, int from_tty)
2873{
f4abbc16
MM
2874 if (args != NULL && *args != 0)
2875 error (_("Invalid argument."));
2876
2877 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2878
492d29ea
PA
2879 TRY
2880 {
2881 execute_command ("target record-btrace", from_tty);
2882 }
2883 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2884 {
2885 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2886 throw_exception (exception);
2887 }
492d29ea 2888 END_CATCH
f4abbc16
MM
2889}
2890
bc504a31 2891/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2892
2893static void
b20a6524 2894cmd_record_btrace_pt_start (char *args, int from_tty)
afedecd3
MM
2895{
2896 if (args != NULL && *args != 0)
2897 error (_("Invalid argument."));
2898
b20a6524 2899 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2900
492d29ea
PA
2901 TRY
2902 {
2903 execute_command ("target record-btrace", from_tty);
2904 }
2905 CATCH (exception, RETURN_MASK_ALL)
2906 {
2907 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2908 throw_exception (exception);
2909 }
2910 END_CATCH
afedecd3
MM
2911}
2912
b20a6524
MM
2913/* Alias for "target record". */
2914
2915static void
2916cmd_record_btrace_start (char *args, int from_tty)
2917{
2918 if (args != NULL && *args != 0)
2919 error (_("Invalid argument."));
2920
2921 record_btrace_conf.format = BTRACE_FORMAT_PT;
2922
2923 TRY
2924 {
2925 execute_command ("target record-btrace", from_tty);
2926 }
2927 CATCH (exception, RETURN_MASK_ALL)
2928 {
2929 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2930
2931 TRY
2932 {
2933 execute_command ("target record-btrace", from_tty);
2934 }
2935 CATCH (exception, RETURN_MASK_ALL)
2936 {
2937 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2938 throw_exception (exception);
2939 }
2940 END_CATCH
2941 }
2942 END_CATCH
2943}
2944
67b5c0c1
MM
2945/* The "set record btrace" command. */
2946
2947static void
2948cmd_set_record_btrace (char *args, int from_tty)
2949{
2950 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2951}
2952
2953/* The "show record btrace" command. */
2954
2955static void
2956cmd_show_record_btrace (char *args, int from_tty)
2957{
2958 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2959}
2960
2961/* The "show record btrace replay-memory-access" command. */
2962
2963static void
2964cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2965 struct cmd_list_element *c, const char *value)
2966{
2967 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2968 replay_memory_access);
2969}
2970
d33501a5
MM
2971/* The "set record btrace bts" command. */
2972
2973static void
2974cmd_set_record_btrace_bts (char *args, int from_tty)
2975{
2976 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 2977 "by an appropriate subcommand.\n"));
d33501a5
MM
2978 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2979 all_commands, gdb_stdout);
2980}
2981
2982/* The "show record btrace bts" command. */
2983
2984static void
2985cmd_show_record_btrace_bts (char *args, int from_tty)
2986{
2987 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2988}
2989
b20a6524
MM
2990/* The "set record btrace pt" command. */
2991
2992static void
2993cmd_set_record_btrace_pt (char *args, int from_tty)
2994{
2995 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2996 "by an appropriate subcommand.\n"));
2997 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2998 all_commands, gdb_stdout);
2999}
3000
3001/* The "show record btrace pt" command. */
3002
3003static void
3004cmd_show_record_btrace_pt (char *args, int from_tty)
3005{
3006 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3007}
3008
3009/* The "record bts buffer-size" show value function. */
3010
3011static void
3012show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3013 struct cmd_list_element *c,
3014 const char *value)
3015{
3016 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3017 value);
3018}
3019
3020/* The "record pt buffer-size" show value function. */
3021
3022static void
3023show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3024 struct cmd_list_element *c,
3025 const char *value)
3026{
3027 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3028 value);
3029}
3030
afedecd3
MM
3031void _initialize_record_btrace (void);
3032
3033/* Initialize btrace commands. */
3034
3035void
3036_initialize_record_btrace (void)
3037{
f4abbc16
MM
3038 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3039 _("Start branch trace recording."), &record_btrace_cmdlist,
3040 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3041 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3042
f4abbc16
MM
3043 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3044 _("\
3045Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3046The processor stores a from/to record for each branch into a cyclic buffer.\n\
3047This format may not be available on all processors."),
3048 &record_btrace_cmdlist);
3049 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3050
b20a6524
MM
3051 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3052 _("\
bc504a31 3053Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3054This format may not be available on all processors."),
3055 &record_btrace_cmdlist);
3056 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3057
67b5c0c1
MM
3058 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3059 _("Set record options"), &set_record_btrace_cmdlist,
3060 "set record btrace ", 0, &set_record_cmdlist);
3061
3062 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3063 _("Show record options"), &show_record_btrace_cmdlist,
3064 "show record btrace ", 0, &show_record_cmdlist);
3065
3066 add_setshow_enum_cmd ("replay-memory-access", no_class,
3067 replay_memory_access_types, &replay_memory_access, _("\
3068Set what memory accesses are allowed during replay."), _("\
3069Show what memory accesses are allowed during replay."),
3070 _("Default is READ-ONLY.\n\n\
3071The btrace record target does not trace data.\n\
3072The memory therefore corresponds to the live target and not \
3073to the current replay position.\n\n\
3074When READ-ONLY, allow accesses to read-only memory during replay.\n\
3075When READ-WRITE, allow accesses to read-only and read-write memory during \
3076replay."),
3077 NULL, cmd_show_replay_memory_access,
3078 &set_record_btrace_cmdlist,
3079 &show_record_btrace_cmdlist);
3080
d33501a5
MM
3081 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3082 _("Set record btrace bts options"),
3083 &set_record_btrace_bts_cmdlist,
3084 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3085
3086 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3087 _("Show record btrace bts options"),
3088 &show_record_btrace_bts_cmdlist,
3089 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3090
3091 add_setshow_uinteger_cmd ("buffer-size", no_class,
3092 &record_btrace_conf.bts.size,
3093 _("Set the record/replay bts buffer size."),
3094 _("Show the record/replay bts buffer size."), _("\
3095When starting recording request a trace buffer of this size. \
3096The actual buffer size may differ from the requested size. \
3097Use \"info record\" to see the actual buffer size.\n\n\
3098Bigger buffers allow longer recording but also take more time to process \
3099the recorded execution trace.\n\n\
b20a6524
MM
3100The trace buffer size may not be changed while recording."), NULL,
3101 show_record_bts_buffer_size_value,
d33501a5
MM
3102 &set_record_btrace_bts_cmdlist,
3103 &show_record_btrace_bts_cmdlist);
3104
b20a6524
MM
3105 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3106 _("Set record btrace pt options"),
3107 &set_record_btrace_pt_cmdlist,
3108 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3109
3110 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3111 _("Show record btrace pt options"),
3112 &show_record_btrace_pt_cmdlist,
3113 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3114
3115 add_setshow_uinteger_cmd ("buffer-size", no_class,
3116 &record_btrace_conf.pt.size,
3117 _("Set the record/replay pt buffer size."),
3118 _("Show the record/replay pt buffer size."), _("\
3119Bigger buffers allow longer recording but also take more time to process \
3120the recorded execution.\n\
3121The actual buffer size may differ from the requested size. Use \"info record\" \
3122to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3123 &set_record_btrace_pt_cmdlist,
3124 &show_record_btrace_pt_cmdlist);
3125
afedecd3
MM
3126 init_record_btrace_ops ();
3127 add_target (&record_btrace_ops);
0b722aec
MM
3128
3129 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3130 xcalloc, xfree);
d33501a5
MM
3131
3132 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3133 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3134}