]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
btrace: non-stop
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 #include "vec.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
76
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
79
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91 #define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101 /* Update the branch trace for the current thread and return a pointer to its
102 thread_info.
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
107 static struct thread_info *
108 require_btrace_thread (void)
109 {
110 struct thread_info *tp;
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
122
123 return tp;
124 }
125
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132 static struct btrace_thread_info *
133 require_btrace (void)
134 {
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
140 }
141
142 /* Enable branch tracing for one thread. Warn on errors. */
143
144 static void
145 record_btrace_enable_warn (struct thread_info *tp)
146 {
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
156 }
157
158 /* Callback function to disable branch tracing for one thread. */
159
160 static void
161 record_btrace_disable_callback (void *arg)
162 {
163 struct thread_info *tp;
164
165 tp = arg;
166
167 btrace_disable (tp);
168 }
169
170 /* Enable automatic tracing of new threads. */
171
172 static void
173 record_btrace_auto_enable (void)
174 {
175 DEBUG ("attach thread observer");
176
177 record_btrace_thread_observer
178 = observer_attach_new_thread (record_btrace_enable_warn);
179 }
180
181 /* Disable automatic tracing of new threads. */
182
183 static void
184 record_btrace_auto_disable (void)
185 {
186 /* The observer may have been detached, already. */
187 if (record_btrace_thread_observer == NULL)
188 return;
189
190 DEBUG ("detach thread observer");
191
192 observer_detach_new_thread (record_btrace_thread_observer);
193 record_btrace_thread_observer = NULL;
194 }
195
196 /* The record-btrace async event handler function. */
197
198 static void
199 record_btrace_handle_async_inferior_event (gdb_client_data data)
200 {
201 inferior_event_handler (INF_REG_EVENT, NULL);
202 }
203
204 /* The to_open method of target record-btrace. */
205
206 static void
207 record_btrace_open (const char *args, int from_tty)
208 {
209 struct cleanup *disable_chain;
210 struct thread_info *tp;
211
212 DEBUG ("open");
213
214 record_preopen ();
215
216 if (!target_has_execution)
217 error (_("The program is not being run."));
218
219 gdb_assert (record_btrace_thread_observer == NULL);
220
221 disable_chain = make_cleanup (null_cleanup, NULL);
222 ALL_NON_EXITED_THREADS (tp)
223 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
224 {
225 btrace_enable (tp, &record_btrace_conf);
226
227 make_cleanup (record_btrace_disable_callback, tp);
228 }
229
230 record_btrace_auto_enable ();
231
232 push_target (&record_btrace_ops);
233
234 record_btrace_async_inferior_event_handler
235 = create_async_event_handler (record_btrace_handle_async_inferior_event,
236 NULL);
237 record_btrace_generating_corefile = 0;
238
239 observer_notify_record_changed (current_inferior (), 1);
240
241 discard_cleanups (disable_chain);
242 }
243
244 /* The to_stop_recording method of target record-btrace. */
245
246 static void
247 record_btrace_stop_recording (struct target_ops *self)
248 {
249 struct thread_info *tp;
250
251 DEBUG ("stop recording");
252
253 record_btrace_auto_disable ();
254
255 ALL_NON_EXITED_THREADS (tp)
256 if (tp->btrace.target != NULL)
257 btrace_disable (tp);
258 }
259
260 /* The to_close method of target record-btrace. */
261
262 static void
263 record_btrace_close (struct target_ops *self)
264 {
265 struct thread_info *tp;
266
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
269
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
273
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
276 ALL_NON_EXITED_THREADS (tp)
277 btrace_teardown (tp);
278 }
279
280 /* The to_async method of target record-btrace. */
281
282 static void
283 record_btrace_async (struct target_ops *ops, int enable)
284 {
285 if (enable)
286 mark_async_event_handler (record_btrace_async_inferior_event_handler);
287 else
288 clear_async_event_handler (record_btrace_async_inferior_event_handler);
289
290 ops->beneath->to_async (ops->beneath, enable);
291 }
292
293 /* Adjusts the size and returns a human readable size suffix. */
294
295 static const char *
296 record_btrace_adjust_size (unsigned int *size)
297 {
298 unsigned int sz;
299
300 sz = *size;
301
302 if ((sz & ((1u << 30) - 1)) == 0)
303 {
304 *size = sz >> 30;
305 return "GB";
306 }
307 else if ((sz & ((1u << 20) - 1)) == 0)
308 {
309 *size = sz >> 20;
310 return "MB";
311 }
312 else if ((sz & ((1u << 10) - 1)) == 0)
313 {
314 *size = sz >> 10;
315 return "kB";
316 }
317 else
318 return "";
319 }
320
321 /* Print a BTS configuration. */
322
323 static void
324 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
325 {
326 const char *suffix;
327 unsigned int size;
328
329 size = conf->size;
330 if (size > 0)
331 {
332 suffix = record_btrace_adjust_size (&size);
333 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
334 }
335 }
336
337 /* Print an Intel(R) Processor Trace configuration. */
338
339 static void
340 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
341 {
342 const char *suffix;
343 unsigned int size;
344
345 size = conf->size;
346 if (size > 0)
347 {
348 suffix = record_btrace_adjust_size (&size);
349 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
350 }
351 }
352
353 /* Print a branch tracing configuration. */
354
355 static void
356 record_btrace_print_conf (const struct btrace_config *conf)
357 {
358 printf_unfiltered (_("Recording format: %s.\n"),
359 btrace_format_string (conf->format));
360
361 switch (conf->format)
362 {
363 case BTRACE_FORMAT_NONE:
364 return;
365
366 case BTRACE_FORMAT_BTS:
367 record_btrace_print_bts_conf (&conf->bts);
368 return;
369
370 case BTRACE_FORMAT_PT:
371 record_btrace_print_pt_conf (&conf->pt);
372 return;
373 }
374
375 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
376 }
377
378 /* The to_info_record method of target record-btrace. */
379
380 static void
381 record_btrace_info (struct target_ops *self)
382 {
383 struct btrace_thread_info *btinfo;
384 const struct btrace_config *conf;
385 struct thread_info *tp;
386 unsigned int insns, calls, gaps;
387
388 DEBUG ("info");
389
390 tp = find_thread_ptid (inferior_ptid);
391 if (tp == NULL)
392 error (_("No thread."));
393
394 btinfo = &tp->btrace;
395
396 conf = btrace_conf (btinfo);
397 if (conf != NULL)
398 record_btrace_print_conf (conf);
399
400 btrace_fetch (tp);
401
402 insns = 0;
403 calls = 0;
404 gaps = 0;
405
406 if (!btrace_is_empty (tp))
407 {
408 struct btrace_call_iterator call;
409 struct btrace_insn_iterator insn;
410
411 btrace_call_end (&call, btinfo);
412 btrace_call_prev (&call, 1);
413 calls = btrace_call_number (&call);
414
415 btrace_insn_end (&insn, btinfo);
416
417 insns = btrace_insn_number (&insn);
418 if (insns != 0)
419 {
420 /* The last instruction does not really belong to the trace. */
421 insns -= 1;
422 }
423 else
424 {
425 unsigned int steps;
426
427 /* Skip gaps at the end. */
428 do
429 {
430 steps = btrace_insn_prev (&insn, 1);
431 if (steps == 0)
432 break;
433
434 insns = btrace_insn_number (&insn);
435 }
436 while (insns == 0);
437 }
438
439 gaps = btinfo->ngaps;
440 }
441
442 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
443 "for thread %d (%s).\n"), insns, calls, gaps,
444 tp->num, target_pid_to_str (tp->ptid));
445
446 if (btrace_is_replaying (tp))
447 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
448 btrace_insn_number (btinfo->replay));
449 }
450
451 /* Print a decode error. */
452
453 static void
454 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
455 enum btrace_format format)
456 {
457 const char *errstr;
458 int is_error;
459
460 errstr = _("unknown");
461 is_error = 1;
462
463 switch (format)
464 {
465 default:
466 break;
467
468 case BTRACE_FORMAT_BTS:
469 switch (errcode)
470 {
471 default:
472 break;
473
474 case BDE_BTS_OVERFLOW:
475 errstr = _("instruction overflow");
476 break;
477
478 case BDE_BTS_INSN_SIZE:
479 errstr = _("unknown instruction");
480 break;
481 }
482 break;
483
484 #if defined (HAVE_LIBIPT)
485 case BTRACE_FORMAT_PT:
486 switch (errcode)
487 {
488 case BDE_PT_USER_QUIT:
489 is_error = 0;
490 errstr = _("trace decode cancelled");
491 break;
492
493 case BDE_PT_DISABLED:
494 is_error = 0;
495 errstr = _("disabled");
496 break;
497
498 case BDE_PT_OVERFLOW:
499 is_error = 0;
500 errstr = _("overflow");
501 break;
502
503 default:
504 if (errcode < 0)
505 errstr = pt_errstr (pt_errcode (errcode));
506 break;
507 }
508 break;
509 #endif /* defined (HAVE_LIBIPT) */
510 }
511
512 ui_out_text (uiout, _("["));
513 if (is_error)
514 {
515 ui_out_text (uiout, _("decode error ("));
516 ui_out_field_int (uiout, "errcode", errcode);
517 ui_out_text (uiout, _("): "));
518 }
519 ui_out_text (uiout, errstr);
520 ui_out_text (uiout, _("]\n"));
521 }
522
523 /* Print an unsigned int. */
524
525 static void
526 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
527 {
528 ui_out_field_fmt (uiout, fld, "%u", val);
529 }
530
531 /* Disassemble a section of the recorded instruction trace. */
532
533 static void
534 btrace_insn_history (struct ui_out *uiout,
535 const struct btrace_thread_info *btinfo,
536 const struct btrace_insn_iterator *begin,
537 const struct btrace_insn_iterator *end, int flags)
538 {
539 struct gdbarch *gdbarch;
540 struct btrace_insn_iterator it;
541
542 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
543 btrace_insn_number (end));
544
545 gdbarch = target_gdbarch ();
546
547 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
548 {
549 const struct btrace_insn *insn;
550
551 insn = btrace_insn_get (&it);
552
553 /* A NULL instruction indicates a gap in the trace. */
554 if (insn == NULL)
555 {
556 const struct btrace_config *conf;
557
558 conf = btrace_conf (btinfo);
559
560 /* We have trace so we must have a configuration. */
561 gdb_assert (conf != NULL);
562
563 btrace_ui_out_decode_error (uiout, it.function->errcode,
564 conf->format);
565 }
566 else
567 {
568 char prefix[4];
569
570 /* We may add a speculation prefix later. We use the same space
571 that is used for the pc prefix. */
572 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
573 strncpy (prefix, pc_prefix (insn->pc), 3);
574 else
575 {
576 prefix[0] = ' ';
577 prefix[1] = ' ';
578 prefix[2] = ' ';
579 }
580 prefix[3] = 0;
581
582 /* Print the instruction index. */
583 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
584 ui_out_text (uiout, "\t");
585
586 /* Indicate speculative execution by a leading '?'. */
587 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
588 prefix[0] = '?';
589
590 /* Print the prefix; we tell gdb_disassembly below to omit it. */
591 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
592
593 /* Disassembly with '/m' flag may not produce the expected result.
594 See PR gdb/11833. */
595 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
596 1, insn->pc, insn->pc + 1);
597 }
598 }
599 }
600
601 /* The to_insn_history method of target record-btrace. */
602
603 static void
604 record_btrace_insn_history (struct target_ops *self, int size, int flags)
605 {
606 struct btrace_thread_info *btinfo;
607 struct btrace_insn_history *history;
608 struct btrace_insn_iterator begin, end;
609 struct cleanup *uiout_cleanup;
610 struct ui_out *uiout;
611 unsigned int context, covered;
612
613 uiout = current_uiout;
614 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
615 "insn history");
616 context = abs (size);
617 if (context == 0)
618 error (_("Bad record instruction-history-size."));
619
620 btinfo = require_btrace ();
621 history = btinfo->insn_history;
622 if (history == NULL)
623 {
624 struct btrace_insn_iterator *replay;
625
626 DEBUG ("insn-history (0x%x): %d", flags, size);
627
628 /* If we're replaying, we start at the replay position. Otherwise, we
629 start at the tail of the trace. */
630 replay = btinfo->replay;
631 if (replay != NULL)
632 begin = *replay;
633 else
634 btrace_insn_end (&begin, btinfo);
635
636 /* We start from here and expand in the requested direction. Then we
637 expand in the other direction, as well, to fill up any remaining
638 context. */
639 end = begin;
640 if (size < 0)
641 {
642 /* We want the current position covered, as well. */
643 covered = btrace_insn_next (&end, 1);
644 covered += btrace_insn_prev (&begin, context - covered);
645 covered += btrace_insn_next (&end, context - covered);
646 }
647 else
648 {
649 covered = btrace_insn_next (&end, context);
650 covered += btrace_insn_prev (&begin, context - covered);
651 }
652 }
653 else
654 {
655 begin = history->begin;
656 end = history->end;
657
658 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
659 btrace_insn_number (&begin), btrace_insn_number (&end));
660
661 if (size < 0)
662 {
663 end = begin;
664 covered = btrace_insn_prev (&begin, context);
665 }
666 else
667 {
668 begin = end;
669 covered = btrace_insn_next (&end, context);
670 }
671 }
672
673 if (covered > 0)
674 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
675 else
676 {
677 if (size < 0)
678 printf_unfiltered (_("At the start of the branch trace record.\n"));
679 else
680 printf_unfiltered (_("At the end of the branch trace record.\n"));
681 }
682
683 btrace_set_insn_history (btinfo, &begin, &end);
684 do_cleanups (uiout_cleanup);
685 }
686
687 /* The to_insn_history_range method of target record-btrace. */
688
689 static void
690 record_btrace_insn_history_range (struct target_ops *self,
691 ULONGEST from, ULONGEST to, int flags)
692 {
693 struct btrace_thread_info *btinfo;
694 struct btrace_insn_history *history;
695 struct btrace_insn_iterator begin, end;
696 struct cleanup *uiout_cleanup;
697 struct ui_out *uiout;
698 unsigned int low, high;
699 int found;
700
701 uiout = current_uiout;
702 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
703 "insn history");
704 low = from;
705 high = to;
706
707 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
708
709 /* Check for wrap-arounds. */
710 if (low != from || high != to)
711 error (_("Bad range."));
712
713 if (high < low)
714 error (_("Bad range."));
715
716 btinfo = require_btrace ();
717
718 found = btrace_find_insn_by_number (&begin, btinfo, low);
719 if (found == 0)
720 error (_("Range out of bounds."));
721
722 found = btrace_find_insn_by_number (&end, btinfo, high);
723 if (found == 0)
724 {
725 /* Silently truncate the range. */
726 btrace_insn_end (&end, btinfo);
727 }
728 else
729 {
730 /* We want both begin and end to be inclusive. */
731 btrace_insn_next (&end, 1);
732 }
733
734 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
735 btrace_set_insn_history (btinfo, &begin, &end);
736
737 do_cleanups (uiout_cleanup);
738 }
739
740 /* The to_insn_history_from method of target record-btrace. */
741
742 static void
743 record_btrace_insn_history_from (struct target_ops *self,
744 ULONGEST from, int size, int flags)
745 {
746 ULONGEST begin, end, context;
747
748 context = abs (size);
749 if (context == 0)
750 error (_("Bad record instruction-history-size."));
751
752 if (size < 0)
753 {
754 end = from;
755
756 if (from < context)
757 begin = 0;
758 else
759 begin = from - context + 1;
760 }
761 else
762 {
763 begin = from;
764 end = from + context - 1;
765
766 /* Check for wrap-around. */
767 if (end < begin)
768 end = ULONGEST_MAX;
769 }
770
771 record_btrace_insn_history_range (self, begin, end, flags);
772 }
773
774 /* Print the instruction number range for a function call history line. */
775
776 static void
777 btrace_call_history_insn_range (struct ui_out *uiout,
778 const struct btrace_function *bfun)
779 {
780 unsigned int begin, end, size;
781
782 size = VEC_length (btrace_insn_s, bfun->insn);
783 gdb_assert (size > 0);
784
785 begin = bfun->insn_offset;
786 end = begin + size - 1;
787
788 ui_out_field_uint (uiout, "insn begin", begin);
789 ui_out_text (uiout, ",");
790 ui_out_field_uint (uiout, "insn end", end);
791 }
792
793 /* Compute the lowest and highest source line for the instructions in BFUN
794 and return them in PBEGIN and PEND.
795 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
796 result from inlining or macro expansion. */
797
798 static void
799 btrace_compute_src_line_range (const struct btrace_function *bfun,
800 int *pbegin, int *pend)
801 {
802 struct btrace_insn *insn;
803 struct symtab *symtab;
804 struct symbol *sym;
805 unsigned int idx;
806 int begin, end;
807
808 begin = INT_MAX;
809 end = INT_MIN;
810
811 sym = bfun->sym;
812 if (sym == NULL)
813 goto out;
814
815 symtab = symbol_symtab (sym);
816
817 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
818 {
819 struct symtab_and_line sal;
820
821 sal = find_pc_line (insn->pc, 0);
822 if (sal.symtab != symtab || sal.line == 0)
823 continue;
824
825 begin = min (begin, sal.line);
826 end = max (end, sal.line);
827 }
828
829 out:
830 *pbegin = begin;
831 *pend = end;
832 }
833
834 /* Print the source line information for a function call history line. */
835
836 static void
837 btrace_call_history_src_line (struct ui_out *uiout,
838 const struct btrace_function *bfun)
839 {
840 struct symbol *sym;
841 int begin, end;
842
843 sym = bfun->sym;
844 if (sym == NULL)
845 return;
846
847 ui_out_field_string (uiout, "file",
848 symtab_to_filename_for_display (symbol_symtab (sym)));
849
850 btrace_compute_src_line_range (bfun, &begin, &end);
851 if (end < begin)
852 return;
853
854 ui_out_text (uiout, ":");
855 ui_out_field_int (uiout, "min line", begin);
856
857 if (end == begin)
858 return;
859
860 ui_out_text (uiout, ",");
861 ui_out_field_int (uiout, "max line", end);
862 }
863
864 /* Get the name of a branch trace function. */
865
866 static const char *
867 btrace_get_bfun_name (const struct btrace_function *bfun)
868 {
869 struct minimal_symbol *msym;
870 struct symbol *sym;
871
872 if (bfun == NULL)
873 return "??";
874
875 msym = bfun->msym;
876 sym = bfun->sym;
877
878 if (sym != NULL)
879 return SYMBOL_PRINT_NAME (sym);
880 else if (msym != NULL)
881 return MSYMBOL_PRINT_NAME (msym);
882 else
883 return "??";
884 }
885
886 /* Disassemble a section of the recorded function trace. */
887
888 static void
889 btrace_call_history (struct ui_out *uiout,
890 const struct btrace_thread_info *btinfo,
891 const struct btrace_call_iterator *begin,
892 const struct btrace_call_iterator *end,
893 enum record_print_flag flags)
894 {
895 struct btrace_call_iterator it;
896
897 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
898 btrace_call_number (end));
899
900 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
901 {
902 const struct btrace_function *bfun;
903 struct minimal_symbol *msym;
904 struct symbol *sym;
905
906 bfun = btrace_call_get (&it);
907 sym = bfun->sym;
908 msym = bfun->msym;
909
910 /* Print the function index. */
911 ui_out_field_uint (uiout, "index", bfun->number);
912 ui_out_text (uiout, "\t");
913
914 /* Indicate gaps in the trace. */
915 if (bfun->errcode != 0)
916 {
917 const struct btrace_config *conf;
918
919 conf = btrace_conf (btinfo);
920
921 /* We have trace so we must have a configuration. */
922 gdb_assert (conf != NULL);
923
924 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
925
926 continue;
927 }
928
929 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
930 {
931 int level = bfun->level + btinfo->level, i;
932
933 for (i = 0; i < level; ++i)
934 ui_out_text (uiout, " ");
935 }
936
937 if (sym != NULL)
938 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
939 else if (msym != NULL)
940 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
941 else if (!ui_out_is_mi_like_p (uiout))
942 ui_out_field_string (uiout, "function", "??");
943
944 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
945 {
946 ui_out_text (uiout, _("\tinst "));
947 btrace_call_history_insn_range (uiout, bfun);
948 }
949
950 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
951 {
952 ui_out_text (uiout, _("\tat "));
953 btrace_call_history_src_line (uiout, bfun);
954 }
955
956 ui_out_text (uiout, "\n");
957 }
958 }
959
960 /* The to_call_history method of target record-btrace. */
961
962 static void
963 record_btrace_call_history (struct target_ops *self, int size, int flags)
964 {
965 struct btrace_thread_info *btinfo;
966 struct btrace_call_history *history;
967 struct btrace_call_iterator begin, end;
968 struct cleanup *uiout_cleanup;
969 struct ui_out *uiout;
970 unsigned int context, covered;
971
972 uiout = current_uiout;
973 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
974 "insn history");
975 context = abs (size);
976 if (context == 0)
977 error (_("Bad record function-call-history-size."));
978
979 btinfo = require_btrace ();
980 history = btinfo->call_history;
981 if (history == NULL)
982 {
983 struct btrace_insn_iterator *replay;
984
985 DEBUG ("call-history (0x%x): %d", flags, size);
986
987 /* If we're replaying, we start at the replay position. Otherwise, we
988 start at the tail of the trace. */
989 replay = btinfo->replay;
990 if (replay != NULL)
991 {
992 begin.function = replay->function;
993 begin.btinfo = btinfo;
994 }
995 else
996 btrace_call_end (&begin, btinfo);
997
998 /* We start from here and expand in the requested direction. Then we
999 expand in the other direction, as well, to fill up any remaining
1000 context. */
1001 end = begin;
1002 if (size < 0)
1003 {
1004 /* We want the current position covered, as well. */
1005 covered = btrace_call_next (&end, 1);
1006 covered += btrace_call_prev (&begin, context - covered);
1007 covered += btrace_call_next (&end, context - covered);
1008 }
1009 else
1010 {
1011 covered = btrace_call_next (&end, context);
1012 covered += btrace_call_prev (&begin, context- covered);
1013 }
1014 }
1015 else
1016 {
1017 begin = history->begin;
1018 end = history->end;
1019
1020 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1021 btrace_call_number (&begin), btrace_call_number (&end));
1022
1023 if (size < 0)
1024 {
1025 end = begin;
1026 covered = btrace_call_prev (&begin, context);
1027 }
1028 else
1029 {
1030 begin = end;
1031 covered = btrace_call_next (&end, context);
1032 }
1033 }
1034
1035 if (covered > 0)
1036 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1037 else
1038 {
1039 if (size < 0)
1040 printf_unfiltered (_("At the start of the branch trace record.\n"));
1041 else
1042 printf_unfiltered (_("At the end of the branch trace record.\n"));
1043 }
1044
1045 btrace_set_call_history (btinfo, &begin, &end);
1046 do_cleanups (uiout_cleanup);
1047 }
1048
1049 /* The to_call_history_range method of target record-btrace. */
1050
1051 static void
1052 record_btrace_call_history_range (struct target_ops *self,
1053 ULONGEST from, ULONGEST to, int flags)
1054 {
1055 struct btrace_thread_info *btinfo;
1056 struct btrace_call_history *history;
1057 struct btrace_call_iterator begin, end;
1058 struct cleanup *uiout_cleanup;
1059 struct ui_out *uiout;
1060 unsigned int low, high;
1061 int found;
1062
1063 uiout = current_uiout;
1064 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1065 "func history");
1066 low = from;
1067 high = to;
1068
1069 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1070
1071 /* Check for wrap-arounds. */
1072 if (low != from || high != to)
1073 error (_("Bad range."));
1074
1075 if (high < low)
1076 error (_("Bad range."));
1077
1078 btinfo = require_btrace ();
1079
1080 found = btrace_find_call_by_number (&begin, btinfo, low);
1081 if (found == 0)
1082 error (_("Range out of bounds."));
1083
1084 found = btrace_find_call_by_number (&end, btinfo, high);
1085 if (found == 0)
1086 {
1087 /* Silently truncate the range. */
1088 btrace_call_end (&end, btinfo);
1089 }
1090 else
1091 {
1092 /* We want both begin and end to be inclusive. */
1093 btrace_call_next (&end, 1);
1094 }
1095
1096 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1097 btrace_set_call_history (btinfo, &begin, &end);
1098
1099 do_cleanups (uiout_cleanup);
1100 }
1101
1102 /* The to_call_history_from method of target record-btrace. */
1103
1104 static void
1105 record_btrace_call_history_from (struct target_ops *self,
1106 ULONGEST from, int size, int flags)
1107 {
1108 ULONGEST begin, end, context;
1109
1110 context = abs (size);
1111 if (context == 0)
1112 error (_("Bad record function-call-history-size."));
1113
1114 if (size < 0)
1115 {
1116 end = from;
1117
1118 if (from < context)
1119 begin = 0;
1120 else
1121 begin = from - context + 1;
1122 }
1123 else
1124 {
1125 begin = from;
1126 end = from + context - 1;
1127
1128 /* Check for wrap-around. */
1129 if (end < begin)
1130 end = ULONGEST_MAX;
1131 }
1132
1133 record_btrace_call_history_range (self, begin, end, flags);
1134 }
1135
1136 /* The to_record_is_replaying method of target record-btrace. */
1137
1138 static int
1139 record_btrace_is_replaying (struct target_ops *self)
1140 {
1141 struct thread_info *tp;
1142
1143 ALL_NON_EXITED_THREADS (tp)
1144 if (btrace_is_replaying (tp))
1145 return 1;
1146
1147 return 0;
1148 }
1149
1150 /* The to_xfer_partial method of target record-btrace. */
1151
1152 static enum target_xfer_status
1153 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1154 const char *annex, gdb_byte *readbuf,
1155 const gdb_byte *writebuf, ULONGEST offset,
1156 ULONGEST len, ULONGEST *xfered_len)
1157 {
1158 struct target_ops *t;
1159
1160 /* Filter out requests that don't make sense during replay. */
1161 if (replay_memory_access == replay_memory_access_read_only
1162 && !record_btrace_generating_corefile
1163 && record_btrace_is_replaying (ops))
1164 {
1165 switch (object)
1166 {
1167 case TARGET_OBJECT_MEMORY:
1168 {
1169 struct target_section *section;
1170
1171 /* We do not allow writing memory in general. */
1172 if (writebuf != NULL)
1173 {
1174 *xfered_len = len;
1175 return TARGET_XFER_UNAVAILABLE;
1176 }
1177
1178 /* We allow reading readonly memory. */
1179 section = target_section_by_addr (ops, offset);
1180 if (section != NULL)
1181 {
1182 /* Check if the section we found is readonly. */
1183 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1184 section->the_bfd_section)
1185 & SEC_READONLY) != 0)
1186 {
1187 /* Truncate the request to fit into this section. */
1188 len = min (len, section->endaddr - offset);
1189 break;
1190 }
1191 }
1192
1193 *xfered_len = len;
1194 return TARGET_XFER_UNAVAILABLE;
1195 }
1196 }
1197 }
1198
1199 /* Forward the request. */
1200 ops = ops->beneath;
1201 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1202 offset, len, xfered_len);
1203 }
1204
1205 /* The to_insert_breakpoint method of target record-btrace. */
1206
1207 static int
1208 record_btrace_insert_breakpoint (struct target_ops *ops,
1209 struct gdbarch *gdbarch,
1210 struct bp_target_info *bp_tgt)
1211 {
1212 const char *old;
1213 int ret;
1214
1215 /* Inserting breakpoints requires accessing memory. Allow it for the
1216 duration of this function. */
1217 old = replay_memory_access;
1218 replay_memory_access = replay_memory_access_read_write;
1219
1220 ret = 0;
1221 TRY
1222 {
1223 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1224 }
1225 CATCH (except, RETURN_MASK_ALL)
1226 {
1227 replay_memory_access = old;
1228 throw_exception (except);
1229 }
1230 END_CATCH
1231 replay_memory_access = old;
1232
1233 return ret;
1234 }
1235
1236 /* The to_remove_breakpoint method of target record-btrace. */
1237
1238 static int
1239 record_btrace_remove_breakpoint (struct target_ops *ops,
1240 struct gdbarch *gdbarch,
1241 struct bp_target_info *bp_tgt)
1242 {
1243 const char *old;
1244 int ret;
1245
1246 /* Removing breakpoints requires accessing memory. Allow it for the
1247 duration of this function. */
1248 old = replay_memory_access;
1249 replay_memory_access = replay_memory_access_read_write;
1250
1251 ret = 0;
1252 TRY
1253 {
1254 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1255 }
1256 CATCH (except, RETURN_MASK_ALL)
1257 {
1258 replay_memory_access = old;
1259 throw_exception (except);
1260 }
1261 END_CATCH
1262 replay_memory_access = old;
1263
1264 return ret;
1265 }
1266
1267 /* The to_fetch_registers method of target record-btrace. */
1268
1269 static void
1270 record_btrace_fetch_registers (struct target_ops *ops,
1271 struct regcache *regcache, int regno)
1272 {
1273 struct btrace_insn_iterator *replay;
1274 struct thread_info *tp;
1275
1276 tp = find_thread_ptid (inferior_ptid);
1277 gdb_assert (tp != NULL);
1278
1279 replay = tp->btrace.replay;
1280 if (replay != NULL && !record_btrace_generating_corefile)
1281 {
1282 const struct btrace_insn *insn;
1283 struct gdbarch *gdbarch;
1284 int pcreg;
1285
1286 gdbarch = get_regcache_arch (regcache);
1287 pcreg = gdbarch_pc_regnum (gdbarch);
1288 if (pcreg < 0)
1289 return;
1290
1291 /* We can only provide the PC register. */
1292 if (regno >= 0 && regno != pcreg)
1293 return;
1294
1295 insn = btrace_insn_get (replay);
1296 gdb_assert (insn != NULL);
1297
1298 regcache_raw_supply (regcache, regno, &insn->pc);
1299 }
1300 else
1301 {
1302 struct target_ops *t = ops->beneath;
1303
1304 t->to_fetch_registers (t, regcache, regno);
1305 }
1306 }
1307
1308 /* The to_store_registers method of target record-btrace. */
1309
1310 static void
1311 record_btrace_store_registers (struct target_ops *ops,
1312 struct regcache *regcache, int regno)
1313 {
1314 struct target_ops *t;
1315
1316 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1317 error (_("This record target does not allow writing registers."));
1318
1319 gdb_assert (may_write_registers != 0);
1320
1321 t = ops->beneath;
1322 t->to_store_registers (t, regcache, regno);
1323 }
1324
1325 /* The to_prepare_to_store method of target record-btrace. */
1326
1327 static void
1328 record_btrace_prepare_to_store (struct target_ops *ops,
1329 struct regcache *regcache)
1330 {
1331 struct target_ops *t;
1332
1333 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1334 return;
1335
1336 t = ops->beneath;
1337 t->to_prepare_to_store (t, regcache);
1338 }
1339
1340 /* The branch trace frame cache. */
1341
1342 struct btrace_frame_cache
1343 {
1344 /* The thread. */
1345 struct thread_info *tp;
1346
1347 /* The frame info. */
1348 struct frame_info *frame;
1349
1350 /* The branch trace function segment. */
1351 const struct btrace_function *bfun;
1352 };
1353
1354 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1355
1356 static htab_t bfcache;
1357
1358 /* hash_f for htab_create_alloc of bfcache. */
1359
1360 static hashval_t
1361 bfcache_hash (const void *arg)
1362 {
1363 const struct btrace_frame_cache *cache = arg;
1364
1365 return htab_hash_pointer (cache->frame);
1366 }
1367
1368 /* eq_f for htab_create_alloc of bfcache. */
1369
1370 static int
1371 bfcache_eq (const void *arg1, const void *arg2)
1372 {
1373 const struct btrace_frame_cache *cache1 = arg1;
1374 const struct btrace_frame_cache *cache2 = arg2;
1375
1376 return cache1->frame == cache2->frame;
1377 }
1378
1379 /* Create a new btrace frame cache. */
1380
1381 static struct btrace_frame_cache *
1382 bfcache_new (struct frame_info *frame)
1383 {
1384 struct btrace_frame_cache *cache;
1385 void **slot;
1386
1387 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1388 cache->frame = frame;
1389
1390 slot = htab_find_slot (bfcache, cache, INSERT);
1391 gdb_assert (*slot == NULL);
1392 *slot = cache;
1393
1394 return cache;
1395 }
1396
1397 /* Extract the branch trace function from a branch trace frame. */
1398
1399 static const struct btrace_function *
1400 btrace_get_frame_function (struct frame_info *frame)
1401 {
1402 const struct btrace_frame_cache *cache;
1403 const struct btrace_function *bfun;
1404 struct btrace_frame_cache pattern;
1405 void **slot;
1406
1407 pattern.frame = frame;
1408
1409 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1410 if (slot == NULL)
1411 return NULL;
1412
1413 cache = *slot;
1414 return cache->bfun;
1415 }
1416
1417 /* Implement stop_reason method for record_btrace_frame_unwind. */
1418
1419 static enum unwind_stop_reason
1420 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1421 void **this_cache)
1422 {
1423 const struct btrace_frame_cache *cache;
1424 const struct btrace_function *bfun;
1425
1426 cache = *this_cache;
1427 bfun = cache->bfun;
1428 gdb_assert (bfun != NULL);
1429
1430 if (bfun->up == NULL)
1431 return UNWIND_UNAVAILABLE;
1432
1433 return UNWIND_NO_REASON;
1434 }
1435
1436 /* Implement this_id method for record_btrace_frame_unwind. */
1437
1438 static void
1439 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1440 struct frame_id *this_id)
1441 {
1442 const struct btrace_frame_cache *cache;
1443 const struct btrace_function *bfun;
1444 CORE_ADDR code, special;
1445
1446 cache = *this_cache;
1447
1448 bfun = cache->bfun;
1449 gdb_assert (bfun != NULL);
1450
1451 while (bfun->segment.prev != NULL)
1452 bfun = bfun->segment.prev;
1453
1454 code = get_frame_func (this_frame);
1455 special = bfun->number;
1456
1457 *this_id = frame_id_build_unavailable_stack_special (code, special);
1458
1459 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1460 btrace_get_bfun_name (cache->bfun),
1461 core_addr_to_string_nz (this_id->code_addr),
1462 core_addr_to_string_nz (this_id->special_addr));
1463 }
1464
1465 /* Implement prev_register method for record_btrace_frame_unwind. */
1466
1467 static struct value *
1468 record_btrace_frame_prev_register (struct frame_info *this_frame,
1469 void **this_cache,
1470 int regnum)
1471 {
1472 const struct btrace_frame_cache *cache;
1473 const struct btrace_function *bfun, *caller;
1474 const struct btrace_insn *insn;
1475 struct gdbarch *gdbarch;
1476 CORE_ADDR pc;
1477 int pcreg;
1478
1479 gdbarch = get_frame_arch (this_frame);
1480 pcreg = gdbarch_pc_regnum (gdbarch);
1481 if (pcreg < 0 || regnum != pcreg)
1482 throw_error (NOT_AVAILABLE_ERROR,
1483 _("Registers are not available in btrace record history"));
1484
1485 cache = *this_cache;
1486 bfun = cache->bfun;
1487 gdb_assert (bfun != NULL);
1488
1489 caller = bfun->up;
1490 if (caller == NULL)
1491 throw_error (NOT_AVAILABLE_ERROR,
1492 _("No caller in btrace record history"));
1493
1494 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1495 {
1496 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1497 pc = insn->pc;
1498 }
1499 else
1500 {
1501 insn = VEC_last (btrace_insn_s, caller->insn);
1502 pc = insn->pc;
1503
1504 pc += gdb_insn_length (gdbarch, pc);
1505 }
1506
1507 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1508 btrace_get_bfun_name (bfun), bfun->level,
1509 core_addr_to_string_nz (pc));
1510
1511 return frame_unwind_got_address (this_frame, regnum, pc);
1512 }
1513
1514 /* Implement sniffer method for record_btrace_frame_unwind. */
1515
1516 static int
1517 record_btrace_frame_sniffer (const struct frame_unwind *self,
1518 struct frame_info *this_frame,
1519 void **this_cache)
1520 {
1521 const struct btrace_function *bfun;
1522 struct btrace_frame_cache *cache;
1523 struct thread_info *tp;
1524 struct frame_info *next;
1525
1526 /* THIS_FRAME does not contain a reference to its thread. */
1527 tp = find_thread_ptid (inferior_ptid);
1528 gdb_assert (tp != NULL);
1529
1530 bfun = NULL;
1531 next = get_next_frame (this_frame);
1532 if (next == NULL)
1533 {
1534 const struct btrace_insn_iterator *replay;
1535
1536 replay = tp->btrace.replay;
1537 if (replay != NULL)
1538 bfun = replay->function;
1539 }
1540 else
1541 {
1542 const struct btrace_function *callee;
1543
1544 callee = btrace_get_frame_function (next);
1545 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1546 bfun = callee->up;
1547 }
1548
1549 if (bfun == NULL)
1550 return 0;
1551
1552 DEBUG ("[frame] sniffed frame for %s on level %d",
1553 btrace_get_bfun_name (bfun), bfun->level);
1554
1555 /* This is our frame. Initialize the frame cache. */
1556 cache = bfcache_new (this_frame);
1557 cache->tp = tp;
1558 cache->bfun = bfun;
1559
1560 *this_cache = cache;
1561 return 1;
1562 }
1563
1564 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1565
1566 static int
1567 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1568 struct frame_info *this_frame,
1569 void **this_cache)
1570 {
1571 const struct btrace_function *bfun, *callee;
1572 struct btrace_frame_cache *cache;
1573 struct frame_info *next;
1574
1575 next = get_next_frame (this_frame);
1576 if (next == NULL)
1577 return 0;
1578
1579 callee = btrace_get_frame_function (next);
1580 if (callee == NULL)
1581 return 0;
1582
1583 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1584 return 0;
1585
1586 bfun = callee->up;
1587 if (bfun == NULL)
1588 return 0;
1589
1590 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1591 btrace_get_bfun_name (bfun), bfun->level);
1592
1593 /* This is our frame. Initialize the frame cache. */
1594 cache = bfcache_new (this_frame);
1595 cache->tp = find_thread_ptid (inferior_ptid);
1596 cache->bfun = bfun;
1597
1598 *this_cache = cache;
1599 return 1;
1600 }
1601
1602 static void
1603 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1604 {
1605 struct btrace_frame_cache *cache;
1606 void **slot;
1607
1608 cache = this_cache;
1609
1610 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1611 gdb_assert (slot != NULL);
1612
1613 htab_remove_elt (bfcache, cache);
1614 }
1615
1616 /* btrace recording does not store previous memory content, neither the stack
1617 frames content. Any unwinding would return errorneous results as the stack
1618 contents no longer matches the changed PC value restored from history.
1619 Therefore this unwinder reports any possibly unwound registers as
1620 <unavailable>. */
1621
1622 const struct frame_unwind record_btrace_frame_unwind =
1623 {
1624 NORMAL_FRAME,
1625 record_btrace_frame_unwind_stop_reason,
1626 record_btrace_frame_this_id,
1627 record_btrace_frame_prev_register,
1628 NULL,
1629 record_btrace_frame_sniffer,
1630 record_btrace_frame_dealloc_cache
1631 };
1632
1633 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1634 {
1635 TAILCALL_FRAME,
1636 record_btrace_frame_unwind_stop_reason,
1637 record_btrace_frame_this_id,
1638 record_btrace_frame_prev_register,
1639 NULL,
1640 record_btrace_tailcall_frame_sniffer,
1641 record_btrace_frame_dealloc_cache
1642 };
1643
1644 /* Implement the to_get_unwinder method. */
1645
1646 static const struct frame_unwind *
1647 record_btrace_to_get_unwinder (struct target_ops *self)
1648 {
1649 return &record_btrace_frame_unwind;
1650 }
1651
1652 /* Implement the to_get_tailcall_unwinder method. */
1653
1654 static const struct frame_unwind *
1655 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1656 {
1657 return &record_btrace_tailcall_frame_unwind;
1658 }
1659
1660 /* Return a human-readable string for FLAG. */
1661
1662 static const char *
1663 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1664 {
1665 switch (flag)
1666 {
1667 case BTHR_STEP:
1668 return "step";
1669
1670 case BTHR_RSTEP:
1671 return "reverse-step";
1672
1673 case BTHR_CONT:
1674 return "cont";
1675
1676 case BTHR_RCONT:
1677 return "reverse-cont";
1678
1679 case BTHR_STOP:
1680 return "stop";
1681 }
1682
1683 return "<invalid>";
1684 }
1685
1686 /* Indicate that TP should be resumed according to FLAG. */
1687
1688 static void
1689 record_btrace_resume_thread (struct thread_info *tp,
1690 enum btrace_thread_flag flag)
1691 {
1692 struct btrace_thread_info *btinfo;
1693
1694 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1695 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1696
1697 btinfo = &tp->btrace;
1698
1699 /* Fetch the latest branch trace. */
1700 btrace_fetch (tp);
1701
1702 /* A resume request overwrites a preceding resume or stop request. */
1703 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1704 btinfo->flags |= flag;
1705 }
1706
1707 /* Get the current frame for TP. */
1708
1709 static struct frame_info *
1710 get_thread_current_frame (struct thread_info *tp)
1711 {
1712 struct frame_info *frame;
1713 ptid_t old_inferior_ptid;
1714 int executing;
1715
1716 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1717 old_inferior_ptid = inferior_ptid;
1718 inferior_ptid = tp->ptid;
1719
1720 /* Clear the executing flag to allow changes to the current frame.
1721 We are not actually running, yet. We just started a reverse execution
1722 command or a record goto command.
1723 For the latter, EXECUTING is false and this has no effect.
1724 For the former, EXECUTING is true and we're in to_wait, about to
1725 move the thread. Since we need to recompute the stack, we temporarily
1726 set EXECUTING to flase. */
1727 executing = is_executing (inferior_ptid);
1728 set_executing (inferior_ptid, 0);
1729
1730 frame = NULL;
1731 TRY
1732 {
1733 frame = get_current_frame ();
1734 }
1735 CATCH (except, RETURN_MASK_ALL)
1736 {
1737 /* Restore the previous execution state. */
1738 set_executing (inferior_ptid, executing);
1739
1740 /* Restore the previous inferior_ptid. */
1741 inferior_ptid = old_inferior_ptid;
1742
1743 throw_exception (except);
1744 }
1745 END_CATCH
1746
1747 /* Restore the previous execution state. */
1748 set_executing (inferior_ptid, executing);
1749
1750 /* Restore the previous inferior_ptid. */
1751 inferior_ptid = old_inferior_ptid;
1752
1753 return frame;
1754 }
1755
1756 /* Start replaying a thread. */
1757
1758 static struct btrace_insn_iterator *
1759 record_btrace_start_replaying (struct thread_info *tp)
1760 {
1761 struct btrace_insn_iterator *replay;
1762 struct btrace_thread_info *btinfo;
1763
1764 btinfo = &tp->btrace;
1765 replay = NULL;
1766
1767 /* We can't start replaying without trace. */
1768 if (btinfo->begin == NULL)
1769 return NULL;
1770
1771 /* GDB stores the current frame_id when stepping in order to detects steps
1772 into subroutines.
1773 Since frames are computed differently when we're replaying, we need to
1774 recompute those stored frames and fix them up so we can still detect
1775 subroutines after we started replaying. */
1776 TRY
1777 {
1778 struct frame_info *frame;
1779 struct frame_id frame_id;
1780 int upd_step_frame_id, upd_step_stack_frame_id;
1781
1782 /* The current frame without replaying - computed via normal unwind. */
1783 frame = get_thread_current_frame (tp);
1784 frame_id = get_frame_id (frame);
1785
1786 /* Check if we need to update any stepping-related frame id's. */
1787 upd_step_frame_id = frame_id_eq (frame_id,
1788 tp->control.step_frame_id);
1789 upd_step_stack_frame_id = frame_id_eq (frame_id,
1790 tp->control.step_stack_frame_id);
1791
1792 /* We start replaying at the end of the branch trace. This corresponds
1793 to the current instruction. */
1794 replay = XNEW (struct btrace_insn_iterator);
1795 btrace_insn_end (replay, btinfo);
1796
1797 /* Skip gaps at the end of the trace. */
1798 while (btrace_insn_get (replay) == NULL)
1799 {
1800 unsigned int steps;
1801
1802 steps = btrace_insn_prev (replay, 1);
1803 if (steps == 0)
1804 error (_("No trace."));
1805 }
1806
1807 /* We're not replaying, yet. */
1808 gdb_assert (btinfo->replay == NULL);
1809 btinfo->replay = replay;
1810
1811 /* Make sure we're not using any stale registers. */
1812 registers_changed_ptid (tp->ptid);
1813
1814 /* The current frame with replaying - computed via btrace unwind. */
1815 frame = get_thread_current_frame (tp);
1816 frame_id = get_frame_id (frame);
1817
1818 /* Replace stepping related frames where necessary. */
1819 if (upd_step_frame_id)
1820 tp->control.step_frame_id = frame_id;
1821 if (upd_step_stack_frame_id)
1822 tp->control.step_stack_frame_id = frame_id;
1823 }
1824 CATCH (except, RETURN_MASK_ALL)
1825 {
1826 xfree (btinfo->replay);
1827 btinfo->replay = NULL;
1828
1829 registers_changed_ptid (tp->ptid);
1830
1831 throw_exception (except);
1832 }
1833 END_CATCH
1834
1835 return replay;
1836 }
1837
1838 /* Stop replaying a thread. */
1839
1840 static void
1841 record_btrace_stop_replaying (struct thread_info *tp)
1842 {
1843 struct btrace_thread_info *btinfo;
1844
1845 btinfo = &tp->btrace;
1846
1847 xfree (btinfo->replay);
1848 btinfo->replay = NULL;
1849
1850 /* Make sure we're not leaving any stale registers. */
1851 registers_changed_ptid (tp->ptid);
1852 }
1853
1854 /* Stop replaying TP if it is at the end of its execution history. */
1855
1856 static void
1857 record_btrace_stop_replaying_at_end (struct thread_info *tp)
1858 {
1859 struct btrace_insn_iterator *replay, end;
1860 struct btrace_thread_info *btinfo;
1861
1862 btinfo = &tp->btrace;
1863 replay = btinfo->replay;
1864
1865 if (replay == NULL)
1866 return;
1867
1868 btrace_insn_end (&end, btinfo);
1869
1870 if (btrace_insn_cmp (replay, &end) == 0)
1871 record_btrace_stop_replaying (tp);
1872 }
1873
1874 /* The to_resume method of target record-btrace. */
1875
1876 static void
1877 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1878 enum gdb_signal signal)
1879 {
1880 struct thread_info *tp;
1881 enum btrace_thread_flag flag;
1882 ptid_t orig_ptid;
1883
1884 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1885 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1886 step ? "step" : "cont");
1887
1888 orig_ptid = ptid;
1889
1890 /* Store the execution direction of the last resume.
1891
1892 If there is more than one to_resume call, we have to rely on infrun
1893 to not change the execution direction in-between. */
1894 record_btrace_resume_exec_dir = execution_direction;
1895
1896 /* For all-stop targets... */
1897 if (!target_is_non_stop_p ())
1898 {
1899 /* ...we pick the current thread when asked to resume an entire process
1900 or everything. */
1901 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1902 ptid = inferior_ptid;
1903
1904 tp = find_thread_ptid (ptid);
1905 if (tp == NULL)
1906 error (_("Cannot find thread to resume."));
1907
1908 /* ...and we stop replaying other threads if the thread to resume is not
1909 replaying. */
1910 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1911 ALL_NON_EXITED_THREADS (tp)
1912 record_btrace_stop_replaying (tp);
1913 }
1914
1915 /* As long as we're not replaying, just forward the request.
1916
1917 For non-stop targets this means that no thread is replaying. In order to
1918 make progress, we may need to explicitly move replaying threads to the end
1919 of their execution history. */
1920 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1921 {
1922 ops = ops->beneath;
1923 return ops->to_resume (ops, orig_ptid, step, signal);
1924 }
1925
1926 /* Compute the btrace thread flag for the requested move. */
1927 if (step == 0)
1928 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1929 else
1930 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1931
1932 /* We just indicate the resume intent here. The actual stepping happens in
1933 record_btrace_wait below. */
1934 ALL_NON_EXITED_THREADS (tp)
1935 if (ptid_match (tp->ptid, ptid))
1936 record_btrace_resume_thread (tp, flag);
1937
1938 /* Async support. */
1939 if (target_can_async_p ())
1940 {
1941 target_async (1);
1942 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1943 }
1944 }
1945
1946 /* Cancel resuming TP. */
1947
1948 static void
1949 record_btrace_cancel_resume (struct thread_info *tp)
1950 {
1951 enum btrace_thread_flag flags;
1952
1953 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1954 if (flags == 0)
1955 return;
1956
1957 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1958 target_pid_to_str (tp->ptid), flags,
1959 btrace_thread_flag_to_str (flags));
1960
1961 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
1962 record_btrace_stop_replaying_at_end (tp);
1963 }
1964
1965 /* Return a target_waitstatus indicating that we ran out of history. */
1966
1967 static struct target_waitstatus
1968 btrace_step_no_history (void)
1969 {
1970 struct target_waitstatus status;
1971
1972 status.kind = TARGET_WAITKIND_NO_HISTORY;
1973
1974 return status;
1975 }
1976
1977 /* Return a target_waitstatus indicating that a step finished. */
1978
1979 static struct target_waitstatus
1980 btrace_step_stopped (void)
1981 {
1982 struct target_waitstatus status;
1983
1984 status.kind = TARGET_WAITKIND_STOPPED;
1985 status.value.sig = GDB_SIGNAL_TRAP;
1986
1987 return status;
1988 }
1989
1990 /* Return a target_waitstatus indicating that a thread was stopped as
1991 requested. */
1992
1993 static struct target_waitstatus
1994 btrace_step_stopped_on_request (void)
1995 {
1996 struct target_waitstatus status;
1997
1998 status.kind = TARGET_WAITKIND_STOPPED;
1999 status.value.sig = GDB_SIGNAL_0;
2000
2001 return status;
2002 }
2003
2004 /* Return a target_waitstatus indicating a spurious stop. */
2005
2006 static struct target_waitstatus
2007 btrace_step_spurious (void)
2008 {
2009 struct target_waitstatus status;
2010
2011 status.kind = TARGET_WAITKIND_SPURIOUS;
2012
2013 return status;
2014 }
2015
2016 /* Return a target_waitstatus indicating that the thread was not resumed. */
2017
2018 static struct target_waitstatus
2019 btrace_step_no_resumed (void)
2020 {
2021 struct target_waitstatus status;
2022
2023 status.kind = TARGET_WAITKIND_NO_RESUMED;
2024
2025 return status;
2026 }
2027
2028 /* Return a target_waitstatus indicating that we should wait again. */
2029
2030 static struct target_waitstatus
2031 btrace_step_again (void)
2032 {
2033 struct target_waitstatus status;
2034
2035 status.kind = TARGET_WAITKIND_IGNORE;
2036
2037 return status;
2038 }
2039
2040 /* Clear the record histories. */
2041
2042 static void
2043 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2044 {
2045 xfree (btinfo->insn_history);
2046 xfree (btinfo->call_history);
2047
2048 btinfo->insn_history = NULL;
2049 btinfo->call_history = NULL;
2050 }
2051
2052 /* Check whether TP's current replay position is at a breakpoint. */
2053
2054 static int
2055 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2056 {
2057 struct btrace_insn_iterator *replay;
2058 struct btrace_thread_info *btinfo;
2059 const struct btrace_insn *insn;
2060 struct inferior *inf;
2061
2062 btinfo = &tp->btrace;
2063 replay = btinfo->replay;
2064
2065 if (replay == NULL)
2066 return 0;
2067
2068 insn = btrace_insn_get (replay);
2069 if (insn == NULL)
2070 return 0;
2071
2072 inf = find_inferior_ptid (tp->ptid);
2073 if (inf == NULL)
2074 return 0;
2075
2076 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2077 &btinfo->stop_reason);
2078 }
2079
2080 /* Step one instruction in forward direction. */
2081
2082 static struct target_waitstatus
2083 record_btrace_single_step_forward (struct thread_info *tp)
2084 {
2085 struct btrace_insn_iterator *replay, end;
2086 struct btrace_thread_info *btinfo;
2087
2088 btinfo = &tp->btrace;
2089 replay = btinfo->replay;
2090
2091 /* We're done if we're not replaying. */
2092 if (replay == NULL)
2093 return btrace_step_no_history ();
2094
2095 /* Check if we're stepping a breakpoint. */
2096 if (record_btrace_replay_at_breakpoint (tp))
2097 return btrace_step_stopped ();
2098
2099 /* Skip gaps during replay. */
2100 do
2101 {
2102 unsigned int steps;
2103
2104 /* We will bail out here if we continue stepping after reaching the end
2105 of the execution history. */
2106 steps = btrace_insn_next (replay, 1);
2107 if (steps == 0)
2108 return btrace_step_no_history ();
2109 }
2110 while (btrace_insn_get (replay) == NULL);
2111
2112 /* Determine the end of the instruction trace. */
2113 btrace_insn_end (&end, btinfo);
2114
2115 /* The execution trace contains (and ends with) the current instruction.
2116 This instruction has not been executed, yet, so the trace really ends
2117 one instruction earlier. */
2118 if (btrace_insn_cmp (replay, &end) == 0)
2119 return btrace_step_no_history ();
2120
2121 return btrace_step_spurious ();
2122 }
2123
2124 /* Step one instruction in backward direction. */
2125
2126 static struct target_waitstatus
2127 record_btrace_single_step_backward (struct thread_info *tp)
2128 {
2129 struct btrace_insn_iterator *replay;
2130 struct btrace_thread_info *btinfo;
2131
2132 btinfo = &tp->btrace;
2133 replay = btinfo->replay;
2134
2135 /* Start replaying if we're not already doing so. */
2136 if (replay == NULL)
2137 replay = record_btrace_start_replaying (tp);
2138
2139 /* If we can't step any further, we reached the end of the history.
2140 Skip gaps during replay. */
2141 do
2142 {
2143 unsigned int steps;
2144
2145 steps = btrace_insn_prev (replay, 1);
2146 if (steps == 0)
2147 return btrace_step_no_history ();
2148 }
2149 while (btrace_insn_get (replay) == NULL);
2150
2151 /* Check if we're stepping a breakpoint.
2152
2153 For reverse-stepping, this check is after the step. There is logic in
2154 infrun.c that handles reverse-stepping separately. See, for example,
2155 proceed and adjust_pc_after_break.
2156
2157 This code assumes that for reverse-stepping, PC points to the last
2158 de-executed instruction, whereas for forward-stepping PC points to the
2159 next to-be-executed instruction. */
2160 if (record_btrace_replay_at_breakpoint (tp))
2161 return btrace_step_stopped ();
2162
2163 return btrace_step_spurious ();
2164 }
2165
2166 /* Step a single thread. */
2167
2168 static struct target_waitstatus
2169 record_btrace_step_thread (struct thread_info *tp)
2170 {
2171 struct btrace_thread_info *btinfo;
2172 struct target_waitstatus status;
2173 enum btrace_thread_flag flags;
2174
2175 btinfo = &tp->btrace;
2176
2177 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2178 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2179
2180 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2181 target_pid_to_str (tp->ptid), flags,
2182 btrace_thread_flag_to_str (flags));
2183
2184 /* We can't step without an execution history. */
2185 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2186 return btrace_step_no_history ();
2187
2188 switch (flags)
2189 {
2190 default:
2191 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2192
2193 case BTHR_STOP:
2194 return btrace_step_stopped_on_request ();
2195
2196 case BTHR_STEP:
2197 status = record_btrace_single_step_forward (tp);
2198 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2199 break;
2200
2201 return btrace_step_stopped ();
2202
2203 case BTHR_RSTEP:
2204 status = record_btrace_single_step_backward (tp);
2205 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2206 break;
2207
2208 return btrace_step_stopped ();
2209
2210 case BTHR_CONT:
2211 status = record_btrace_single_step_forward (tp);
2212 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2213 break;
2214
2215 btinfo->flags |= flags;
2216 return btrace_step_again ();
2217
2218 case BTHR_RCONT:
2219 status = record_btrace_single_step_backward (tp);
2220 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2221 break;
2222
2223 btinfo->flags |= flags;
2224 return btrace_step_again ();
2225 }
2226
2227 /* We keep threads moving at the end of their execution history. The to_wait
2228 method will stop the thread for whom the event is reported. */
2229 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2230 btinfo->flags |= flags;
2231
2232 return status;
2233 }
2234
2235 /* A vector of threads. */
2236
2237 typedef struct thread_info * tp_t;
2238 DEF_VEC_P (tp_t);
2239
2240 /* Announce further events if necessary. */
2241
2242 static void
2243 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2244 const VEC (tp_t) *no_history)
2245 {
2246 int more_moving, more_no_history;
2247
2248 more_moving = !VEC_empty (tp_t, moving);
2249 more_no_history = !VEC_empty (tp_t, no_history);
2250
2251 if (!more_moving && !more_no_history)
2252 return;
2253
2254 if (more_moving)
2255 DEBUG ("movers pending");
2256
2257 if (more_no_history)
2258 DEBUG ("no-history pending");
2259
2260 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2261 }
2262
2263 /* The to_wait method of target record-btrace. */
2264
2265 static ptid_t
2266 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2267 struct target_waitstatus *status, int options)
2268 {
2269 VEC (tp_t) *moving, *no_history;
2270 struct thread_info *tp, *eventing;
2271 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2272
2273 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2274
2275 /* As long as we're not replaying, just forward the request. */
2276 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2277 {
2278 ops = ops->beneath;
2279 return ops->to_wait (ops, ptid, status, options);
2280 }
2281
2282 moving = NULL;
2283 no_history = NULL;
2284
2285 make_cleanup (VEC_cleanup (tp_t), &moving);
2286 make_cleanup (VEC_cleanup (tp_t), &no_history);
2287
2288 /* Keep a work list of moving threads. */
2289 ALL_NON_EXITED_THREADS (tp)
2290 if (ptid_match (tp->ptid, ptid)
2291 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2292 VEC_safe_push (tp_t, moving, tp);
2293
2294 if (VEC_empty (tp_t, moving))
2295 {
2296 *status = btrace_step_no_resumed ();
2297
2298 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2299 target_waitstatus_to_string (status));
2300
2301 do_cleanups (cleanups);
2302 return null_ptid;
2303 }
2304
2305 /* Step moving threads one by one, one step each, until either one thread
2306 reports an event or we run out of threads to step.
2307
2308 When stepping more than one thread, chances are that some threads reach
2309 the end of their execution history earlier than others. If we reported
2310 this immediately, all-stop on top of non-stop would stop all threads and
2311 resume the same threads next time. And we would report the same thread
2312 having reached the end of its execution history again.
2313
2314 In the worst case, this would starve the other threads. But even if other
2315 threads would be allowed to make progress, this would result in far too
2316 many intermediate stops.
2317
2318 We therefore delay the reporting of "no execution history" until we have
2319 nothing else to report. By this time, all threads should have moved to
2320 either the beginning or the end of their execution history. There will
2321 be a single user-visible stop. */
2322 eventing = NULL;
2323 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2324 {
2325 unsigned int ix;
2326
2327 ix = 0;
2328 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2329 {
2330 *status = record_btrace_step_thread (tp);
2331
2332 switch (status->kind)
2333 {
2334 case TARGET_WAITKIND_IGNORE:
2335 ix++;
2336 break;
2337
2338 case TARGET_WAITKIND_NO_HISTORY:
2339 VEC_safe_push (tp_t, no_history,
2340 VEC_ordered_remove (tp_t, moving, ix));
2341 break;
2342
2343 default:
2344 eventing = VEC_unordered_remove (tp_t, moving, ix);
2345 break;
2346 }
2347 }
2348 }
2349
2350 if (eventing == NULL)
2351 {
2352 /* We started with at least one moving thread. This thread must have
2353 either stopped or reached the end of its execution history.
2354
2355 In the former case, EVENTING must not be NULL.
2356 In the latter case, NO_HISTORY must not be empty. */
2357 gdb_assert (!VEC_empty (tp_t, no_history));
2358
2359 /* We kept threads moving at the end of their execution history. Stop
2360 EVENTING now that we are going to report its stop. */
2361 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2362 eventing->btrace.flags &= ~BTHR_MOVE;
2363
2364 *status = btrace_step_no_history ();
2365 }
2366
2367 gdb_assert (eventing != NULL);
2368
2369 /* We kept threads replaying at the end of their execution history. Stop
2370 replaying EVENTING now that we are going to report its stop. */
2371 record_btrace_stop_replaying_at_end (eventing);
2372
2373 /* Stop all other threads. */
2374 if (!target_is_non_stop_p ())
2375 ALL_NON_EXITED_THREADS (tp)
2376 record_btrace_cancel_resume (tp);
2377
2378 /* In async mode, we need to announce further events. */
2379 if (target_is_async_p ())
2380 record_btrace_maybe_mark_async_event (moving, no_history);
2381
2382 /* Start record histories anew from the current position. */
2383 record_btrace_clear_histories (&eventing->btrace);
2384
2385 /* We moved the replay position but did not update registers. */
2386 registers_changed_ptid (eventing->ptid);
2387
2388 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2389 target_pid_to_str (eventing->ptid),
2390 target_waitstatus_to_string (status));
2391
2392 do_cleanups (cleanups);
2393 return eventing->ptid;
2394 }
2395
2396 /* The to_stop method of target record-btrace. */
2397
2398 static void
2399 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2400 {
2401 DEBUG ("stop %s", target_pid_to_str (ptid));
2402
2403 /* As long as we're not replaying, just forward the request. */
2404 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2405 {
2406 ops = ops->beneath;
2407 ops->to_stop (ops, ptid);
2408 }
2409 else
2410 {
2411 struct thread_info *tp;
2412
2413 ALL_NON_EXITED_THREADS (tp)
2414 if (ptid_match (tp->ptid, ptid))
2415 {
2416 tp->btrace.flags &= ~BTHR_MOVE;
2417 tp->btrace.flags |= BTHR_STOP;
2418 }
2419 }
2420 }
2421
2422 /* The to_can_execute_reverse method of target record-btrace. */
2423
2424 static int
2425 record_btrace_can_execute_reverse (struct target_ops *self)
2426 {
2427 return 1;
2428 }
2429
2430 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2431
2432 static int
2433 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2434 {
2435 if (record_btrace_is_replaying (ops))
2436 {
2437 struct thread_info *tp = inferior_thread ();
2438
2439 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2440 }
2441
2442 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2443 }
2444
2445 /* The to_supports_stopped_by_sw_breakpoint method of target
2446 record-btrace. */
2447
2448 static int
2449 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2450 {
2451 if (record_btrace_is_replaying (ops))
2452 return 1;
2453
2454 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2455 }
2456
2457 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2458
2459 static int
2460 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2461 {
2462 if (record_btrace_is_replaying (ops))
2463 {
2464 struct thread_info *tp = inferior_thread ();
2465
2466 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2467 }
2468
2469 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2470 }
2471
2472 /* The to_supports_stopped_by_hw_breakpoint method of target
2473 record-btrace. */
2474
2475 static int
2476 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2477 {
2478 if (record_btrace_is_replaying (ops))
2479 return 1;
2480
2481 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2482 }
2483
2484 /* The to_update_thread_list method of target record-btrace. */
2485
2486 static void
2487 record_btrace_update_thread_list (struct target_ops *ops)
2488 {
2489 /* We don't add or remove threads during replay. */
2490 if (record_btrace_is_replaying (ops))
2491 return;
2492
2493 /* Forward the request. */
2494 ops = ops->beneath;
2495 ops->to_update_thread_list (ops);
2496 }
2497
2498 /* The to_thread_alive method of target record-btrace. */
2499
2500 static int
2501 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2502 {
2503 /* We don't add or remove threads during replay. */
2504 if (record_btrace_is_replaying (ops))
2505 return find_thread_ptid (ptid) != NULL;
2506
2507 /* Forward the request. */
2508 ops = ops->beneath;
2509 return ops->to_thread_alive (ops, ptid);
2510 }
2511
2512 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2513 is stopped. */
2514
2515 static void
2516 record_btrace_set_replay (struct thread_info *tp,
2517 const struct btrace_insn_iterator *it)
2518 {
2519 struct btrace_thread_info *btinfo;
2520
2521 btinfo = &tp->btrace;
2522
2523 if (it == NULL || it->function == NULL)
2524 record_btrace_stop_replaying (tp);
2525 else
2526 {
2527 if (btinfo->replay == NULL)
2528 record_btrace_start_replaying (tp);
2529 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2530 return;
2531
2532 *btinfo->replay = *it;
2533 registers_changed_ptid (tp->ptid);
2534 }
2535
2536 /* Start anew from the new replay position. */
2537 record_btrace_clear_histories (btinfo);
2538
2539 stop_pc = regcache_read_pc (get_current_regcache ());
2540 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2541 }
2542
2543 /* The to_goto_record_begin method of target record-btrace. */
2544
2545 static void
2546 record_btrace_goto_begin (struct target_ops *self)
2547 {
2548 struct thread_info *tp;
2549 struct btrace_insn_iterator begin;
2550
2551 tp = require_btrace_thread ();
2552
2553 btrace_insn_begin (&begin, &tp->btrace);
2554 record_btrace_set_replay (tp, &begin);
2555 }
2556
2557 /* The to_goto_record_end method of target record-btrace. */
2558
2559 static void
2560 record_btrace_goto_end (struct target_ops *ops)
2561 {
2562 struct thread_info *tp;
2563
2564 tp = require_btrace_thread ();
2565
2566 record_btrace_set_replay (tp, NULL);
2567 }
2568
2569 /* The to_goto_record method of target record-btrace. */
2570
2571 static void
2572 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2573 {
2574 struct thread_info *tp;
2575 struct btrace_insn_iterator it;
2576 unsigned int number;
2577 int found;
2578
2579 number = insn;
2580
2581 /* Check for wrap-arounds. */
2582 if (number != insn)
2583 error (_("Instruction number out of range."));
2584
2585 tp = require_btrace_thread ();
2586
2587 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2588 if (found == 0)
2589 error (_("No such instruction."));
2590
2591 record_btrace_set_replay (tp, &it);
2592 }
2593
2594 /* The to_execution_direction target method. */
2595
2596 static enum exec_direction_kind
2597 record_btrace_execution_direction (struct target_ops *self)
2598 {
2599 return record_btrace_resume_exec_dir;
2600 }
2601
2602 /* The to_prepare_to_generate_core target method. */
2603
2604 static void
2605 record_btrace_prepare_to_generate_core (struct target_ops *self)
2606 {
2607 record_btrace_generating_corefile = 1;
2608 }
2609
2610 /* The to_done_generating_core target method. */
2611
2612 static void
2613 record_btrace_done_generating_core (struct target_ops *self)
2614 {
2615 record_btrace_generating_corefile = 0;
2616 }
2617
2618 /* Initialize the record-btrace target ops. */
2619
2620 static void
2621 init_record_btrace_ops (void)
2622 {
2623 struct target_ops *ops;
2624
2625 ops = &record_btrace_ops;
2626 ops->to_shortname = "record-btrace";
2627 ops->to_longname = "Branch tracing target";
2628 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2629 ops->to_open = record_btrace_open;
2630 ops->to_close = record_btrace_close;
2631 ops->to_async = record_btrace_async;
2632 ops->to_detach = record_detach;
2633 ops->to_disconnect = record_disconnect;
2634 ops->to_mourn_inferior = record_mourn_inferior;
2635 ops->to_kill = record_kill;
2636 ops->to_stop_recording = record_btrace_stop_recording;
2637 ops->to_info_record = record_btrace_info;
2638 ops->to_insn_history = record_btrace_insn_history;
2639 ops->to_insn_history_from = record_btrace_insn_history_from;
2640 ops->to_insn_history_range = record_btrace_insn_history_range;
2641 ops->to_call_history = record_btrace_call_history;
2642 ops->to_call_history_from = record_btrace_call_history_from;
2643 ops->to_call_history_range = record_btrace_call_history_range;
2644 ops->to_record_is_replaying = record_btrace_is_replaying;
2645 ops->to_xfer_partial = record_btrace_xfer_partial;
2646 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2647 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2648 ops->to_fetch_registers = record_btrace_fetch_registers;
2649 ops->to_store_registers = record_btrace_store_registers;
2650 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2651 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2652 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2653 ops->to_resume = record_btrace_resume;
2654 ops->to_wait = record_btrace_wait;
2655 ops->to_stop = record_btrace_stop;
2656 ops->to_update_thread_list = record_btrace_update_thread_list;
2657 ops->to_thread_alive = record_btrace_thread_alive;
2658 ops->to_goto_record_begin = record_btrace_goto_begin;
2659 ops->to_goto_record_end = record_btrace_goto_end;
2660 ops->to_goto_record = record_btrace_goto;
2661 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2662 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2663 ops->to_supports_stopped_by_sw_breakpoint
2664 = record_btrace_supports_stopped_by_sw_breakpoint;
2665 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2666 ops->to_supports_stopped_by_hw_breakpoint
2667 = record_btrace_supports_stopped_by_hw_breakpoint;
2668 ops->to_execution_direction = record_btrace_execution_direction;
2669 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2670 ops->to_done_generating_core = record_btrace_done_generating_core;
2671 ops->to_stratum = record_stratum;
2672 ops->to_magic = OPS_MAGIC;
2673 }
2674
2675 /* Start recording in BTS format. */
2676
2677 static void
2678 cmd_record_btrace_bts_start (char *args, int from_tty)
2679 {
2680 if (args != NULL && *args != 0)
2681 error (_("Invalid argument."));
2682
2683 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2684
2685 TRY
2686 {
2687 execute_command ("target record-btrace", from_tty);
2688 }
2689 CATCH (exception, RETURN_MASK_ALL)
2690 {
2691 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2692 throw_exception (exception);
2693 }
2694 END_CATCH
2695 }
2696
2697 /* Start recording Intel(R) Processor Trace. */
2698
2699 static void
2700 cmd_record_btrace_pt_start (char *args, int from_tty)
2701 {
2702 if (args != NULL && *args != 0)
2703 error (_("Invalid argument."));
2704
2705 record_btrace_conf.format = BTRACE_FORMAT_PT;
2706
2707 TRY
2708 {
2709 execute_command ("target record-btrace", from_tty);
2710 }
2711 CATCH (exception, RETURN_MASK_ALL)
2712 {
2713 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2714 throw_exception (exception);
2715 }
2716 END_CATCH
2717 }
2718
2719 /* Alias for "target record". */
2720
2721 static void
2722 cmd_record_btrace_start (char *args, int from_tty)
2723 {
2724 if (args != NULL && *args != 0)
2725 error (_("Invalid argument."));
2726
2727 record_btrace_conf.format = BTRACE_FORMAT_PT;
2728
2729 TRY
2730 {
2731 execute_command ("target record-btrace", from_tty);
2732 }
2733 CATCH (exception, RETURN_MASK_ALL)
2734 {
2735 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2736
2737 TRY
2738 {
2739 execute_command ("target record-btrace", from_tty);
2740 }
2741 CATCH (exception, RETURN_MASK_ALL)
2742 {
2743 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2744 throw_exception (exception);
2745 }
2746 END_CATCH
2747 }
2748 END_CATCH
2749 }
2750
2751 /* The "set record btrace" command. */
2752
2753 static void
2754 cmd_set_record_btrace (char *args, int from_tty)
2755 {
2756 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2757 }
2758
2759 /* The "show record btrace" command. */
2760
2761 static void
2762 cmd_show_record_btrace (char *args, int from_tty)
2763 {
2764 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2765 }
2766
2767 /* The "show record btrace replay-memory-access" command. */
2768
2769 static void
2770 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2771 struct cmd_list_element *c, const char *value)
2772 {
2773 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2774 replay_memory_access);
2775 }
2776
2777 /* The "set record btrace bts" command. */
2778
2779 static void
2780 cmd_set_record_btrace_bts (char *args, int from_tty)
2781 {
2782 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2783 "by an appropriate subcommand.\n"));
2784 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2785 all_commands, gdb_stdout);
2786 }
2787
2788 /* The "show record btrace bts" command. */
2789
2790 static void
2791 cmd_show_record_btrace_bts (char *args, int from_tty)
2792 {
2793 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2794 }
2795
2796 /* The "set record btrace pt" command. */
2797
2798 static void
2799 cmd_set_record_btrace_pt (char *args, int from_tty)
2800 {
2801 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2802 "by an appropriate subcommand.\n"));
2803 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2804 all_commands, gdb_stdout);
2805 }
2806
2807 /* The "show record btrace pt" command. */
2808
2809 static void
2810 cmd_show_record_btrace_pt (char *args, int from_tty)
2811 {
2812 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2813 }
2814
2815 /* The "record bts buffer-size" show value function. */
2816
2817 static void
2818 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2819 struct cmd_list_element *c,
2820 const char *value)
2821 {
2822 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2823 value);
2824 }
2825
2826 /* The "record pt buffer-size" show value function. */
2827
2828 static void
2829 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2830 struct cmd_list_element *c,
2831 const char *value)
2832 {
2833 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2834 value);
2835 }
2836
2837 void _initialize_record_btrace (void);
2838
2839 /* Initialize btrace commands. */
2840
2841 void
2842 _initialize_record_btrace (void)
2843 {
2844 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2845 _("Start branch trace recording."), &record_btrace_cmdlist,
2846 "record btrace ", 0, &record_cmdlist);
2847 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2848
2849 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2850 _("\
2851 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2852 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2853 This format may not be available on all processors."),
2854 &record_btrace_cmdlist);
2855 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2856
2857 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2858 _("\
2859 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2860 This format may not be available on all processors."),
2861 &record_btrace_cmdlist);
2862 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2863
2864 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2865 _("Set record options"), &set_record_btrace_cmdlist,
2866 "set record btrace ", 0, &set_record_cmdlist);
2867
2868 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2869 _("Show record options"), &show_record_btrace_cmdlist,
2870 "show record btrace ", 0, &show_record_cmdlist);
2871
2872 add_setshow_enum_cmd ("replay-memory-access", no_class,
2873 replay_memory_access_types, &replay_memory_access, _("\
2874 Set what memory accesses are allowed during replay."), _("\
2875 Show what memory accesses are allowed during replay."),
2876 _("Default is READ-ONLY.\n\n\
2877 The btrace record target does not trace data.\n\
2878 The memory therefore corresponds to the live target and not \
2879 to the current replay position.\n\n\
2880 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2881 When READ-WRITE, allow accesses to read-only and read-write memory during \
2882 replay."),
2883 NULL, cmd_show_replay_memory_access,
2884 &set_record_btrace_cmdlist,
2885 &show_record_btrace_cmdlist);
2886
2887 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2888 _("Set record btrace bts options"),
2889 &set_record_btrace_bts_cmdlist,
2890 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2891
2892 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2893 _("Show record btrace bts options"),
2894 &show_record_btrace_bts_cmdlist,
2895 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2896
2897 add_setshow_uinteger_cmd ("buffer-size", no_class,
2898 &record_btrace_conf.bts.size,
2899 _("Set the record/replay bts buffer size."),
2900 _("Show the record/replay bts buffer size."), _("\
2901 When starting recording request a trace buffer of this size. \
2902 The actual buffer size may differ from the requested size. \
2903 Use \"info record\" to see the actual buffer size.\n\n\
2904 Bigger buffers allow longer recording but also take more time to process \
2905 the recorded execution trace.\n\n\
2906 The trace buffer size may not be changed while recording."), NULL,
2907 show_record_bts_buffer_size_value,
2908 &set_record_btrace_bts_cmdlist,
2909 &show_record_btrace_bts_cmdlist);
2910
2911 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2912 _("Set record btrace pt options"),
2913 &set_record_btrace_pt_cmdlist,
2914 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2915
2916 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2917 _("Show record btrace pt options"),
2918 &show_record_btrace_pt_cmdlist,
2919 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2920
2921 add_setshow_uinteger_cmd ("buffer-size", no_class,
2922 &record_btrace_conf.pt.size,
2923 _("Set the record/replay pt buffer size."),
2924 _("Show the record/replay pt buffer size."), _("\
2925 Bigger buffers allow longer recording but also take more time to process \
2926 the recorded execution.\n\
2927 The actual buffer size may differ from the requested size. Use \"info record\" \
2928 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2929 &set_record_btrace_pt_cmdlist,
2930 &show_record_btrace_pt_cmdlist);
2931
2932 init_record_btrace_ops ();
2933 add_target (&record_btrace_ops);
2934
2935 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2936 xcalloc, xfree);
2937
2938 record_btrace_conf.bts.size = 64 * 1024;
2939 record_btrace_conf.pt.size = 16 * 1024;
2940 }