]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
btrace: split record_btrace_step_thread
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
43
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
46
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55 };
56
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
59
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
63
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
72
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
75
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
78
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
83 /* Command lists for "set/show record btrace pt". */
84 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
85 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
86
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
89
90 #define DEBUG(msg, args...) \
91 do \
92 { \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
96 } \
97 while (0)
98
99
100 /* Update the branch trace for the current thread and return a pointer to its
101 thread_info.
102
103 Throws an error if there is no thread or no trace. This function never
104 returns NULL. */
105
106 static struct thread_info *
107 require_btrace_thread (void)
108 {
109 struct thread_info *tp;
110
111 DEBUG ("require");
112
113 tp = find_thread_ptid (inferior_ptid);
114 if (tp == NULL)
115 error (_("No thread."));
116
117 btrace_fetch (tp);
118
119 if (btrace_is_empty (tp))
120 error (_("No trace."));
121
122 return tp;
123 }
124
125 /* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
127
128 Throws an error if there is no thread or no trace. This function never
129 returns NULL. */
130
131 static struct btrace_thread_info *
132 require_btrace (void)
133 {
134 struct thread_info *tp;
135
136 tp = require_btrace_thread ();
137
138 return &tp->btrace;
139 }
140
141 /* Enable branch tracing for one thread. Warn on errors. */
142
143 static void
144 record_btrace_enable_warn (struct thread_info *tp)
145 {
146 TRY
147 {
148 btrace_enable (tp, &record_btrace_conf);
149 }
150 CATCH (error, RETURN_MASK_ERROR)
151 {
152 warning ("%s", error.message);
153 }
154 END_CATCH
155 }
156
157 /* Callback function to disable branch tracing for one thread. */
158
159 static void
160 record_btrace_disable_callback (void *arg)
161 {
162 struct thread_info *tp;
163
164 tp = arg;
165
166 btrace_disable (tp);
167 }
168
169 /* Enable automatic tracing of new threads. */
170
171 static void
172 record_btrace_auto_enable (void)
173 {
174 DEBUG ("attach thread observer");
175
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
178 }
179
180 /* Disable automatic tracing of new threads. */
181
182 static void
183 record_btrace_auto_disable (void)
184 {
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
187 return;
188
189 DEBUG ("detach thread observer");
190
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
193 }
194
195 /* The record-btrace async event handler function. */
196
197 static void
198 record_btrace_handle_async_inferior_event (gdb_client_data data)
199 {
200 inferior_event_handler (INF_REG_EVENT, NULL);
201 }
202
203 /* The to_open method of target record-btrace. */
204
205 static void
206 record_btrace_open (const char *args, int from_tty)
207 {
208 struct cleanup *disable_chain;
209 struct thread_info *tp;
210
211 DEBUG ("open");
212
213 record_preopen ();
214
215 if (!target_has_execution)
216 error (_("The program is not being run."));
217
218 if (non_stop)
219 error (_("Record btrace can't debug inferior in non-stop mode."));
220
221 gdb_assert (record_btrace_thread_observer == NULL);
222
223 disable_chain = make_cleanup (null_cleanup, NULL);
224 ALL_NON_EXITED_THREADS (tp)
225 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
226 {
227 btrace_enable (tp, &record_btrace_conf);
228
229 make_cleanup (record_btrace_disable_callback, tp);
230 }
231
232 record_btrace_auto_enable ();
233
234 push_target (&record_btrace_ops);
235
236 record_btrace_async_inferior_event_handler
237 = create_async_event_handler (record_btrace_handle_async_inferior_event,
238 NULL);
239 record_btrace_generating_corefile = 0;
240
241 observer_notify_record_changed (current_inferior (), 1);
242
243 discard_cleanups (disable_chain);
244 }
245
246 /* The to_stop_recording method of target record-btrace. */
247
248 static void
249 record_btrace_stop_recording (struct target_ops *self)
250 {
251 struct thread_info *tp;
252
253 DEBUG ("stop recording");
254
255 record_btrace_auto_disable ();
256
257 ALL_NON_EXITED_THREADS (tp)
258 if (tp->btrace.target != NULL)
259 btrace_disable (tp);
260 }
261
262 /* The to_close method of target record-btrace. */
263
264 static void
265 record_btrace_close (struct target_ops *self)
266 {
267 struct thread_info *tp;
268
269 if (record_btrace_async_inferior_event_handler != NULL)
270 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
271
272 /* Make sure automatic recording gets disabled even if we did not stop
273 recording before closing the record-btrace target. */
274 record_btrace_auto_disable ();
275
276 /* We should have already stopped recording.
277 Tear down btrace in case we have not. */
278 ALL_NON_EXITED_THREADS (tp)
279 btrace_teardown (tp);
280 }
281
282 /* The to_async method of target record-btrace. */
283
284 static void
285 record_btrace_async (struct target_ops *ops, int enable)
286 {
287 if (enable)
288 mark_async_event_handler (record_btrace_async_inferior_event_handler);
289 else
290 clear_async_event_handler (record_btrace_async_inferior_event_handler);
291
292 ops->beneath->to_async (ops->beneath, enable);
293 }
294
295 /* Adjusts the size and returns a human readable size suffix. */
296
297 static const char *
298 record_btrace_adjust_size (unsigned int *size)
299 {
300 unsigned int sz;
301
302 sz = *size;
303
304 if ((sz & ((1u << 30) - 1)) == 0)
305 {
306 *size = sz >> 30;
307 return "GB";
308 }
309 else if ((sz & ((1u << 20) - 1)) == 0)
310 {
311 *size = sz >> 20;
312 return "MB";
313 }
314 else if ((sz & ((1u << 10) - 1)) == 0)
315 {
316 *size = sz >> 10;
317 return "kB";
318 }
319 else
320 return "";
321 }
322
323 /* Print a BTS configuration. */
324
325 static void
326 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
327 {
328 const char *suffix;
329 unsigned int size;
330
331 size = conf->size;
332 if (size > 0)
333 {
334 suffix = record_btrace_adjust_size (&size);
335 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
336 }
337 }
338
339 /* Print an Intel(R) Processor Trace configuration. */
340
341 static void
342 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
343 {
344 const char *suffix;
345 unsigned int size;
346
347 size = conf->size;
348 if (size > 0)
349 {
350 suffix = record_btrace_adjust_size (&size);
351 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
352 }
353 }
354
355 /* Print a branch tracing configuration. */
356
357 static void
358 record_btrace_print_conf (const struct btrace_config *conf)
359 {
360 printf_unfiltered (_("Recording format: %s.\n"),
361 btrace_format_string (conf->format));
362
363 switch (conf->format)
364 {
365 case BTRACE_FORMAT_NONE:
366 return;
367
368 case BTRACE_FORMAT_BTS:
369 record_btrace_print_bts_conf (&conf->bts);
370 return;
371
372 case BTRACE_FORMAT_PT:
373 record_btrace_print_pt_conf (&conf->pt);
374 return;
375 }
376
377 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
378 }
379
380 /* The to_info_record method of target record-btrace. */
381
382 static void
383 record_btrace_info (struct target_ops *self)
384 {
385 struct btrace_thread_info *btinfo;
386 const struct btrace_config *conf;
387 struct thread_info *tp;
388 unsigned int insns, calls, gaps;
389
390 DEBUG ("info");
391
392 tp = find_thread_ptid (inferior_ptid);
393 if (tp == NULL)
394 error (_("No thread."));
395
396 btinfo = &tp->btrace;
397
398 conf = btrace_conf (btinfo);
399 if (conf != NULL)
400 record_btrace_print_conf (conf);
401
402 btrace_fetch (tp);
403
404 insns = 0;
405 calls = 0;
406 gaps = 0;
407
408 if (!btrace_is_empty (tp))
409 {
410 struct btrace_call_iterator call;
411 struct btrace_insn_iterator insn;
412
413 btrace_call_end (&call, btinfo);
414 btrace_call_prev (&call, 1);
415 calls = btrace_call_number (&call);
416
417 btrace_insn_end (&insn, btinfo);
418
419 insns = btrace_insn_number (&insn);
420 if (insns != 0)
421 {
422 /* The last instruction does not really belong to the trace. */
423 insns -= 1;
424 }
425 else
426 {
427 unsigned int steps;
428
429 /* Skip gaps at the end. */
430 do
431 {
432 steps = btrace_insn_prev (&insn, 1);
433 if (steps == 0)
434 break;
435
436 insns = btrace_insn_number (&insn);
437 }
438 while (insns == 0);
439 }
440
441 gaps = btinfo->ngaps;
442 }
443
444 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445 "for thread %d (%s).\n"), insns, calls, gaps,
446 tp->num, target_pid_to_str (tp->ptid));
447
448 if (btrace_is_replaying (tp))
449 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450 btrace_insn_number (btinfo->replay));
451 }
452
453 /* Print a decode error. */
454
455 static void
456 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
457 enum btrace_format format)
458 {
459 const char *errstr;
460 int is_error;
461
462 errstr = _("unknown");
463 is_error = 1;
464
465 switch (format)
466 {
467 default:
468 break;
469
470 case BTRACE_FORMAT_BTS:
471 switch (errcode)
472 {
473 default:
474 break;
475
476 case BDE_BTS_OVERFLOW:
477 errstr = _("instruction overflow");
478 break;
479
480 case BDE_BTS_INSN_SIZE:
481 errstr = _("unknown instruction");
482 break;
483 }
484 break;
485
486 #if defined (HAVE_LIBIPT)
487 case BTRACE_FORMAT_PT:
488 switch (errcode)
489 {
490 case BDE_PT_USER_QUIT:
491 is_error = 0;
492 errstr = _("trace decode cancelled");
493 break;
494
495 case BDE_PT_DISABLED:
496 is_error = 0;
497 errstr = _("disabled");
498 break;
499
500 case BDE_PT_OVERFLOW:
501 is_error = 0;
502 errstr = _("overflow");
503 break;
504
505 default:
506 if (errcode < 0)
507 errstr = pt_errstr (pt_errcode (errcode));
508 break;
509 }
510 break;
511 #endif /* defined (HAVE_LIBIPT) */
512 }
513
514 ui_out_text (uiout, _("["));
515 if (is_error)
516 {
517 ui_out_text (uiout, _("decode error ("));
518 ui_out_field_int (uiout, "errcode", errcode);
519 ui_out_text (uiout, _("): "));
520 }
521 ui_out_text (uiout, errstr);
522 ui_out_text (uiout, _("]\n"));
523 }
524
525 /* Print an unsigned int. */
526
527 static void
528 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
529 {
530 ui_out_field_fmt (uiout, fld, "%u", val);
531 }
532
533 /* Disassemble a section of the recorded instruction trace. */
534
535 static void
536 btrace_insn_history (struct ui_out *uiout,
537 const struct btrace_thread_info *btinfo,
538 const struct btrace_insn_iterator *begin,
539 const struct btrace_insn_iterator *end, int flags)
540 {
541 struct gdbarch *gdbarch;
542 struct btrace_insn_iterator it;
543
544 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
545 btrace_insn_number (end));
546
547 gdbarch = target_gdbarch ();
548
549 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
550 {
551 const struct btrace_insn *insn;
552
553 insn = btrace_insn_get (&it);
554
555 /* A NULL instruction indicates a gap in the trace. */
556 if (insn == NULL)
557 {
558 const struct btrace_config *conf;
559
560 conf = btrace_conf (btinfo);
561
562 /* We have trace so we must have a configuration. */
563 gdb_assert (conf != NULL);
564
565 btrace_ui_out_decode_error (uiout, it.function->errcode,
566 conf->format);
567 }
568 else
569 {
570 char prefix[4];
571
572 /* We may add a speculation prefix later. We use the same space
573 that is used for the pc prefix. */
574 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
575 strncpy (prefix, pc_prefix (insn->pc), 3);
576 else
577 {
578 prefix[0] = ' ';
579 prefix[1] = ' ';
580 prefix[2] = ' ';
581 }
582 prefix[3] = 0;
583
584 /* Print the instruction index. */
585 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
586 ui_out_text (uiout, "\t");
587
588 /* Indicate speculative execution by a leading '?'. */
589 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
590 prefix[0] = '?';
591
592 /* Print the prefix; we tell gdb_disassembly below to omit it. */
593 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
594
595 /* Disassembly with '/m' flag may not produce the expected result.
596 See PR gdb/11833. */
597 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
598 1, insn->pc, insn->pc + 1);
599 }
600 }
601 }
602
603 /* The to_insn_history method of target record-btrace. */
604
605 static void
606 record_btrace_insn_history (struct target_ops *self, int size, int flags)
607 {
608 struct btrace_thread_info *btinfo;
609 struct btrace_insn_history *history;
610 struct btrace_insn_iterator begin, end;
611 struct cleanup *uiout_cleanup;
612 struct ui_out *uiout;
613 unsigned int context, covered;
614
615 uiout = current_uiout;
616 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
617 "insn history");
618 context = abs (size);
619 if (context == 0)
620 error (_("Bad record instruction-history-size."));
621
622 btinfo = require_btrace ();
623 history = btinfo->insn_history;
624 if (history == NULL)
625 {
626 struct btrace_insn_iterator *replay;
627
628 DEBUG ("insn-history (0x%x): %d", flags, size);
629
630 /* If we're replaying, we start at the replay position. Otherwise, we
631 start at the tail of the trace. */
632 replay = btinfo->replay;
633 if (replay != NULL)
634 begin = *replay;
635 else
636 btrace_insn_end (&begin, btinfo);
637
638 /* We start from here and expand in the requested direction. Then we
639 expand in the other direction, as well, to fill up any remaining
640 context. */
641 end = begin;
642 if (size < 0)
643 {
644 /* We want the current position covered, as well. */
645 covered = btrace_insn_next (&end, 1);
646 covered += btrace_insn_prev (&begin, context - covered);
647 covered += btrace_insn_next (&end, context - covered);
648 }
649 else
650 {
651 covered = btrace_insn_next (&end, context);
652 covered += btrace_insn_prev (&begin, context - covered);
653 }
654 }
655 else
656 {
657 begin = history->begin;
658 end = history->end;
659
660 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
661 btrace_insn_number (&begin), btrace_insn_number (&end));
662
663 if (size < 0)
664 {
665 end = begin;
666 covered = btrace_insn_prev (&begin, context);
667 }
668 else
669 {
670 begin = end;
671 covered = btrace_insn_next (&end, context);
672 }
673 }
674
675 if (covered > 0)
676 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
677 else
678 {
679 if (size < 0)
680 printf_unfiltered (_("At the start of the branch trace record.\n"));
681 else
682 printf_unfiltered (_("At the end of the branch trace record.\n"));
683 }
684
685 btrace_set_insn_history (btinfo, &begin, &end);
686 do_cleanups (uiout_cleanup);
687 }
688
689 /* The to_insn_history_range method of target record-btrace. */
690
691 static void
692 record_btrace_insn_history_range (struct target_ops *self,
693 ULONGEST from, ULONGEST to, int flags)
694 {
695 struct btrace_thread_info *btinfo;
696 struct btrace_insn_history *history;
697 struct btrace_insn_iterator begin, end;
698 struct cleanup *uiout_cleanup;
699 struct ui_out *uiout;
700 unsigned int low, high;
701 int found;
702
703 uiout = current_uiout;
704 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
705 "insn history");
706 low = from;
707 high = to;
708
709 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
710
711 /* Check for wrap-arounds. */
712 if (low != from || high != to)
713 error (_("Bad range."));
714
715 if (high < low)
716 error (_("Bad range."));
717
718 btinfo = require_btrace ();
719
720 found = btrace_find_insn_by_number (&begin, btinfo, low);
721 if (found == 0)
722 error (_("Range out of bounds."));
723
724 found = btrace_find_insn_by_number (&end, btinfo, high);
725 if (found == 0)
726 {
727 /* Silently truncate the range. */
728 btrace_insn_end (&end, btinfo);
729 }
730 else
731 {
732 /* We want both begin and end to be inclusive. */
733 btrace_insn_next (&end, 1);
734 }
735
736 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
737 btrace_set_insn_history (btinfo, &begin, &end);
738
739 do_cleanups (uiout_cleanup);
740 }
741
742 /* The to_insn_history_from method of target record-btrace. */
743
744 static void
745 record_btrace_insn_history_from (struct target_ops *self,
746 ULONGEST from, int size, int flags)
747 {
748 ULONGEST begin, end, context;
749
750 context = abs (size);
751 if (context == 0)
752 error (_("Bad record instruction-history-size."));
753
754 if (size < 0)
755 {
756 end = from;
757
758 if (from < context)
759 begin = 0;
760 else
761 begin = from - context + 1;
762 }
763 else
764 {
765 begin = from;
766 end = from + context - 1;
767
768 /* Check for wrap-around. */
769 if (end < begin)
770 end = ULONGEST_MAX;
771 }
772
773 record_btrace_insn_history_range (self, begin, end, flags);
774 }
775
776 /* Print the instruction number range for a function call history line. */
777
778 static void
779 btrace_call_history_insn_range (struct ui_out *uiout,
780 const struct btrace_function *bfun)
781 {
782 unsigned int begin, end, size;
783
784 size = VEC_length (btrace_insn_s, bfun->insn);
785 gdb_assert (size > 0);
786
787 begin = bfun->insn_offset;
788 end = begin + size - 1;
789
790 ui_out_field_uint (uiout, "insn begin", begin);
791 ui_out_text (uiout, ",");
792 ui_out_field_uint (uiout, "insn end", end);
793 }
794
795 /* Compute the lowest and highest source line for the instructions in BFUN
796 and return them in PBEGIN and PEND.
797 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
798 result from inlining or macro expansion. */
799
800 static void
801 btrace_compute_src_line_range (const struct btrace_function *bfun,
802 int *pbegin, int *pend)
803 {
804 struct btrace_insn *insn;
805 struct symtab *symtab;
806 struct symbol *sym;
807 unsigned int idx;
808 int begin, end;
809
810 begin = INT_MAX;
811 end = INT_MIN;
812
813 sym = bfun->sym;
814 if (sym == NULL)
815 goto out;
816
817 symtab = symbol_symtab (sym);
818
819 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
820 {
821 struct symtab_and_line sal;
822
823 sal = find_pc_line (insn->pc, 0);
824 if (sal.symtab != symtab || sal.line == 0)
825 continue;
826
827 begin = min (begin, sal.line);
828 end = max (end, sal.line);
829 }
830
831 out:
832 *pbegin = begin;
833 *pend = end;
834 }
835
836 /* Print the source line information for a function call history line. */
837
838 static void
839 btrace_call_history_src_line (struct ui_out *uiout,
840 const struct btrace_function *bfun)
841 {
842 struct symbol *sym;
843 int begin, end;
844
845 sym = bfun->sym;
846 if (sym == NULL)
847 return;
848
849 ui_out_field_string (uiout, "file",
850 symtab_to_filename_for_display (symbol_symtab (sym)));
851
852 btrace_compute_src_line_range (bfun, &begin, &end);
853 if (end < begin)
854 return;
855
856 ui_out_text (uiout, ":");
857 ui_out_field_int (uiout, "min line", begin);
858
859 if (end == begin)
860 return;
861
862 ui_out_text (uiout, ",");
863 ui_out_field_int (uiout, "max line", end);
864 }
865
866 /* Get the name of a branch trace function. */
867
868 static const char *
869 btrace_get_bfun_name (const struct btrace_function *bfun)
870 {
871 struct minimal_symbol *msym;
872 struct symbol *sym;
873
874 if (bfun == NULL)
875 return "??";
876
877 msym = bfun->msym;
878 sym = bfun->sym;
879
880 if (sym != NULL)
881 return SYMBOL_PRINT_NAME (sym);
882 else if (msym != NULL)
883 return MSYMBOL_PRINT_NAME (msym);
884 else
885 return "??";
886 }
887
888 /* Disassemble a section of the recorded function trace. */
889
890 static void
891 btrace_call_history (struct ui_out *uiout,
892 const struct btrace_thread_info *btinfo,
893 const struct btrace_call_iterator *begin,
894 const struct btrace_call_iterator *end,
895 enum record_print_flag flags)
896 {
897 struct btrace_call_iterator it;
898
899 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
900 btrace_call_number (end));
901
902 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
903 {
904 const struct btrace_function *bfun;
905 struct minimal_symbol *msym;
906 struct symbol *sym;
907
908 bfun = btrace_call_get (&it);
909 sym = bfun->sym;
910 msym = bfun->msym;
911
912 /* Print the function index. */
913 ui_out_field_uint (uiout, "index", bfun->number);
914 ui_out_text (uiout, "\t");
915
916 /* Indicate gaps in the trace. */
917 if (bfun->errcode != 0)
918 {
919 const struct btrace_config *conf;
920
921 conf = btrace_conf (btinfo);
922
923 /* We have trace so we must have a configuration. */
924 gdb_assert (conf != NULL);
925
926 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
927
928 continue;
929 }
930
931 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
932 {
933 int level = bfun->level + btinfo->level, i;
934
935 for (i = 0; i < level; ++i)
936 ui_out_text (uiout, " ");
937 }
938
939 if (sym != NULL)
940 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
941 else if (msym != NULL)
942 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
943 else if (!ui_out_is_mi_like_p (uiout))
944 ui_out_field_string (uiout, "function", "??");
945
946 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
947 {
948 ui_out_text (uiout, _("\tinst "));
949 btrace_call_history_insn_range (uiout, bfun);
950 }
951
952 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
953 {
954 ui_out_text (uiout, _("\tat "));
955 btrace_call_history_src_line (uiout, bfun);
956 }
957
958 ui_out_text (uiout, "\n");
959 }
960 }
961
962 /* The to_call_history method of target record-btrace. */
963
964 static void
965 record_btrace_call_history (struct target_ops *self, int size, int flags)
966 {
967 struct btrace_thread_info *btinfo;
968 struct btrace_call_history *history;
969 struct btrace_call_iterator begin, end;
970 struct cleanup *uiout_cleanup;
971 struct ui_out *uiout;
972 unsigned int context, covered;
973
974 uiout = current_uiout;
975 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
976 "insn history");
977 context = abs (size);
978 if (context == 0)
979 error (_("Bad record function-call-history-size."));
980
981 btinfo = require_btrace ();
982 history = btinfo->call_history;
983 if (history == NULL)
984 {
985 struct btrace_insn_iterator *replay;
986
987 DEBUG ("call-history (0x%x): %d", flags, size);
988
989 /* If we're replaying, we start at the replay position. Otherwise, we
990 start at the tail of the trace. */
991 replay = btinfo->replay;
992 if (replay != NULL)
993 {
994 begin.function = replay->function;
995 begin.btinfo = btinfo;
996 }
997 else
998 btrace_call_end (&begin, btinfo);
999
1000 /* We start from here and expand in the requested direction. Then we
1001 expand in the other direction, as well, to fill up any remaining
1002 context. */
1003 end = begin;
1004 if (size < 0)
1005 {
1006 /* We want the current position covered, as well. */
1007 covered = btrace_call_next (&end, 1);
1008 covered += btrace_call_prev (&begin, context - covered);
1009 covered += btrace_call_next (&end, context - covered);
1010 }
1011 else
1012 {
1013 covered = btrace_call_next (&end, context);
1014 covered += btrace_call_prev (&begin, context- covered);
1015 }
1016 }
1017 else
1018 {
1019 begin = history->begin;
1020 end = history->end;
1021
1022 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1023 btrace_call_number (&begin), btrace_call_number (&end));
1024
1025 if (size < 0)
1026 {
1027 end = begin;
1028 covered = btrace_call_prev (&begin, context);
1029 }
1030 else
1031 {
1032 begin = end;
1033 covered = btrace_call_next (&end, context);
1034 }
1035 }
1036
1037 if (covered > 0)
1038 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1039 else
1040 {
1041 if (size < 0)
1042 printf_unfiltered (_("At the start of the branch trace record.\n"));
1043 else
1044 printf_unfiltered (_("At the end of the branch trace record.\n"));
1045 }
1046
1047 btrace_set_call_history (btinfo, &begin, &end);
1048 do_cleanups (uiout_cleanup);
1049 }
1050
1051 /* The to_call_history_range method of target record-btrace. */
1052
1053 static void
1054 record_btrace_call_history_range (struct target_ops *self,
1055 ULONGEST from, ULONGEST to, int flags)
1056 {
1057 struct btrace_thread_info *btinfo;
1058 struct btrace_call_history *history;
1059 struct btrace_call_iterator begin, end;
1060 struct cleanup *uiout_cleanup;
1061 struct ui_out *uiout;
1062 unsigned int low, high;
1063 int found;
1064
1065 uiout = current_uiout;
1066 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1067 "func history");
1068 low = from;
1069 high = to;
1070
1071 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1072
1073 /* Check for wrap-arounds. */
1074 if (low != from || high != to)
1075 error (_("Bad range."));
1076
1077 if (high < low)
1078 error (_("Bad range."));
1079
1080 btinfo = require_btrace ();
1081
1082 found = btrace_find_call_by_number (&begin, btinfo, low);
1083 if (found == 0)
1084 error (_("Range out of bounds."));
1085
1086 found = btrace_find_call_by_number (&end, btinfo, high);
1087 if (found == 0)
1088 {
1089 /* Silently truncate the range. */
1090 btrace_call_end (&end, btinfo);
1091 }
1092 else
1093 {
1094 /* We want both begin and end to be inclusive. */
1095 btrace_call_next (&end, 1);
1096 }
1097
1098 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1099 btrace_set_call_history (btinfo, &begin, &end);
1100
1101 do_cleanups (uiout_cleanup);
1102 }
1103
1104 /* The to_call_history_from method of target record-btrace. */
1105
1106 static void
1107 record_btrace_call_history_from (struct target_ops *self,
1108 ULONGEST from, int size, int flags)
1109 {
1110 ULONGEST begin, end, context;
1111
1112 context = abs (size);
1113 if (context == 0)
1114 error (_("Bad record function-call-history-size."));
1115
1116 if (size < 0)
1117 {
1118 end = from;
1119
1120 if (from < context)
1121 begin = 0;
1122 else
1123 begin = from - context + 1;
1124 }
1125 else
1126 {
1127 begin = from;
1128 end = from + context - 1;
1129
1130 /* Check for wrap-around. */
1131 if (end < begin)
1132 end = ULONGEST_MAX;
1133 }
1134
1135 record_btrace_call_history_range (self, begin, end, flags);
1136 }
1137
1138 /* The to_record_is_replaying method of target record-btrace. */
1139
1140 static int
1141 record_btrace_is_replaying (struct target_ops *self)
1142 {
1143 struct thread_info *tp;
1144
1145 ALL_NON_EXITED_THREADS (tp)
1146 if (btrace_is_replaying (tp))
1147 return 1;
1148
1149 return 0;
1150 }
1151
1152 /* The to_xfer_partial method of target record-btrace. */
1153
1154 static enum target_xfer_status
1155 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1156 const char *annex, gdb_byte *readbuf,
1157 const gdb_byte *writebuf, ULONGEST offset,
1158 ULONGEST len, ULONGEST *xfered_len)
1159 {
1160 struct target_ops *t;
1161
1162 /* Filter out requests that don't make sense during replay. */
1163 if (replay_memory_access == replay_memory_access_read_only
1164 && !record_btrace_generating_corefile
1165 && record_btrace_is_replaying (ops))
1166 {
1167 switch (object)
1168 {
1169 case TARGET_OBJECT_MEMORY:
1170 {
1171 struct target_section *section;
1172
1173 /* We do not allow writing memory in general. */
1174 if (writebuf != NULL)
1175 {
1176 *xfered_len = len;
1177 return TARGET_XFER_UNAVAILABLE;
1178 }
1179
1180 /* We allow reading readonly memory. */
1181 section = target_section_by_addr (ops, offset);
1182 if (section != NULL)
1183 {
1184 /* Check if the section we found is readonly. */
1185 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1186 section->the_bfd_section)
1187 & SEC_READONLY) != 0)
1188 {
1189 /* Truncate the request to fit into this section. */
1190 len = min (len, section->endaddr - offset);
1191 break;
1192 }
1193 }
1194
1195 *xfered_len = len;
1196 return TARGET_XFER_UNAVAILABLE;
1197 }
1198 }
1199 }
1200
1201 /* Forward the request. */
1202 ops = ops->beneath;
1203 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1204 offset, len, xfered_len);
1205 }
1206
1207 /* The to_insert_breakpoint method of target record-btrace. */
1208
1209 static int
1210 record_btrace_insert_breakpoint (struct target_ops *ops,
1211 struct gdbarch *gdbarch,
1212 struct bp_target_info *bp_tgt)
1213 {
1214 const char *old;
1215 int ret;
1216
1217 /* Inserting breakpoints requires accessing memory. Allow it for the
1218 duration of this function. */
1219 old = replay_memory_access;
1220 replay_memory_access = replay_memory_access_read_write;
1221
1222 ret = 0;
1223 TRY
1224 {
1225 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1226 }
1227 CATCH (except, RETURN_MASK_ALL)
1228 {
1229 replay_memory_access = old;
1230 throw_exception (except);
1231 }
1232 END_CATCH
1233 replay_memory_access = old;
1234
1235 return ret;
1236 }
1237
1238 /* The to_remove_breakpoint method of target record-btrace. */
1239
1240 static int
1241 record_btrace_remove_breakpoint (struct target_ops *ops,
1242 struct gdbarch *gdbarch,
1243 struct bp_target_info *bp_tgt)
1244 {
1245 const char *old;
1246 int ret;
1247
1248 /* Removing breakpoints requires accessing memory. Allow it for the
1249 duration of this function. */
1250 old = replay_memory_access;
1251 replay_memory_access = replay_memory_access_read_write;
1252
1253 ret = 0;
1254 TRY
1255 {
1256 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1257 }
1258 CATCH (except, RETURN_MASK_ALL)
1259 {
1260 replay_memory_access = old;
1261 throw_exception (except);
1262 }
1263 END_CATCH
1264 replay_memory_access = old;
1265
1266 return ret;
1267 }
1268
1269 /* The to_fetch_registers method of target record-btrace. */
1270
1271 static void
1272 record_btrace_fetch_registers (struct target_ops *ops,
1273 struct regcache *regcache, int regno)
1274 {
1275 struct btrace_insn_iterator *replay;
1276 struct thread_info *tp;
1277
1278 tp = find_thread_ptid (inferior_ptid);
1279 gdb_assert (tp != NULL);
1280
1281 replay = tp->btrace.replay;
1282 if (replay != NULL && !record_btrace_generating_corefile)
1283 {
1284 const struct btrace_insn *insn;
1285 struct gdbarch *gdbarch;
1286 int pcreg;
1287
1288 gdbarch = get_regcache_arch (regcache);
1289 pcreg = gdbarch_pc_regnum (gdbarch);
1290 if (pcreg < 0)
1291 return;
1292
1293 /* We can only provide the PC register. */
1294 if (regno >= 0 && regno != pcreg)
1295 return;
1296
1297 insn = btrace_insn_get (replay);
1298 gdb_assert (insn != NULL);
1299
1300 regcache_raw_supply (regcache, regno, &insn->pc);
1301 }
1302 else
1303 {
1304 struct target_ops *t = ops->beneath;
1305
1306 t->to_fetch_registers (t, regcache, regno);
1307 }
1308 }
1309
1310 /* The to_store_registers method of target record-btrace. */
1311
1312 static void
1313 record_btrace_store_registers (struct target_ops *ops,
1314 struct regcache *regcache, int regno)
1315 {
1316 struct target_ops *t;
1317
1318 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1319 error (_("This record target does not allow writing registers."));
1320
1321 gdb_assert (may_write_registers != 0);
1322
1323 t = ops->beneath;
1324 t->to_store_registers (t, regcache, regno);
1325 }
1326
1327 /* The to_prepare_to_store method of target record-btrace. */
1328
1329 static void
1330 record_btrace_prepare_to_store (struct target_ops *ops,
1331 struct regcache *regcache)
1332 {
1333 struct target_ops *t;
1334
1335 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1336 return;
1337
1338 t = ops->beneath;
1339 t->to_prepare_to_store (t, regcache);
1340 }
1341
1342 /* The branch trace frame cache. */
1343
1344 struct btrace_frame_cache
1345 {
1346 /* The thread. */
1347 struct thread_info *tp;
1348
1349 /* The frame info. */
1350 struct frame_info *frame;
1351
1352 /* The branch trace function segment. */
1353 const struct btrace_function *bfun;
1354 };
1355
1356 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1357
1358 static htab_t bfcache;
1359
1360 /* hash_f for htab_create_alloc of bfcache. */
1361
1362 static hashval_t
1363 bfcache_hash (const void *arg)
1364 {
1365 const struct btrace_frame_cache *cache = arg;
1366
1367 return htab_hash_pointer (cache->frame);
1368 }
1369
1370 /* eq_f for htab_create_alloc of bfcache. */
1371
1372 static int
1373 bfcache_eq (const void *arg1, const void *arg2)
1374 {
1375 const struct btrace_frame_cache *cache1 = arg1;
1376 const struct btrace_frame_cache *cache2 = arg2;
1377
1378 return cache1->frame == cache2->frame;
1379 }
1380
1381 /* Create a new btrace frame cache. */
1382
1383 static struct btrace_frame_cache *
1384 bfcache_new (struct frame_info *frame)
1385 {
1386 struct btrace_frame_cache *cache;
1387 void **slot;
1388
1389 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1390 cache->frame = frame;
1391
1392 slot = htab_find_slot (bfcache, cache, INSERT);
1393 gdb_assert (*slot == NULL);
1394 *slot = cache;
1395
1396 return cache;
1397 }
1398
1399 /* Extract the branch trace function from a branch trace frame. */
1400
1401 static const struct btrace_function *
1402 btrace_get_frame_function (struct frame_info *frame)
1403 {
1404 const struct btrace_frame_cache *cache;
1405 const struct btrace_function *bfun;
1406 struct btrace_frame_cache pattern;
1407 void **slot;
1408
1409 pattern.frame = frame;
1410
1411 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1412 if (slot == NULL)
1413 return NULL;
1414
1415 cache = *slot;
1416 return cache->bfun;
1417 }
1418
1419 /* Implement stop_reason method for record_btrace_frame_unwind. */
1420
1421 static enum unwind_stop_reason
1422 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1423 void **this_cache)
1424 {
1425 const struct btrace_frame_cache *cache;
1426 const struct btrace_function *bfun;
1427
1428 cache = *this_cache;
1429 bfun = cache->bfun;
1430 gdb_assert (bfun != NULL);
1431
1432 if (bfun->up == NULL)
1433 return UNWIND_UNAVAILABLE;
1434
1435 return UNWIND_NO_REASON;
1436 }
1437
1438 /* Implement this_id method for record_btrace_frame_unwind. */
1439
1440 static void
1441 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1442 struct frame_id *this_id)
1443 {
1444 const struct btrace_frame_cache *cache;
1445 const struct btrace_function *bfun;
1446 CORE_ADDR code, special;
1447
1448 cache = *this_cache;
1449
1450 bfun = cache->bfun;
1451 gdb_assert (bfun != NULL);
1452
1453 while (bfun->segment.prev != NULL)
1454 bfun = bfun->segment.prev;
1455
1456 code = get_frame_func (this_frame);
1457 special = bfun->number;
1458
1459 *this_id = frame_id_build_unavailable_stack_special (code, special);
1460
1461 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1462 btrace_get_bfun_name (cache->bfun),
1463 core_addr_to_string_nz (this_id->code_addr),
1464 core_addr_to_string_nz (this_id->special_addr));
1465 }
1466
1467 /* Implement prev_register method for record_btrace_frame_unwind. */
1468
1469 static struct value *
1470 record_btrace_frame_prev_register (struct frame_info *this_frame,
1471 void **this_cache,
1472 int regnum)
1473 {
1474 const struct btrace_frame_cache *cache;
1475 const struct btrace_function *bfun, *caller;
1476 const struct btrace_insn *insn;
1477 struct gdbarch *gdbarch;
1478 CORE_ADDR pc;
1479 int pcreg;
1480
1481 gdbarch = get_frame_arch (this_frame);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0 || regnum != pcreg)
1484 throw_error (NOT_AVAILABLE_ERROR,
1485 _("Registers are not available in btrace record history"));
1486
1487 cache = *this_cache;
1488 bfun = cache->bfun;
1489 gdb_assert (bfun != NULL);
1490
1491 caller = bfun->up;
1492 if (caller == NULL)
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("No caller in btrace record history"));
1495
1496 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1497 {
1498 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1499 pc = insn->pc;
1500 }
1501 else
1502 {
1503 insn = VEC_last (btrace_insn_s, caller->insn);
1504 pc = insn->pc;
1505
1506 pc += gdb_insn_length (gdbarch, pc);
1507 }
1508
1509 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1510 btrace_get_bfun_name (bfun), bfun->level,
1511 core_addr_to_string_nz (pc));
1512
1513 return frame_unwind_got_address (this_frame, regnum, pc);
1514 }
1515
1516 /* Implement sniffer method for record_btrace_frame_unwind. */
1517
1518 static int
1519 record_btrace_frame_sniffer (const struct frame_unwind *self,
1520 struct frame_info *this_frame,
1521 void **this_cache)
1522 {
1523 const struct btrace_function *bfun;
1524 struct btrace_frame_cache *cache;
1525 struct thread_info *tp;
1526 struct frame_info *next;
1527
1528 /* THIS_FRAME does not contain a reference to its thread. */
1529 tp = find_thread_ptid (inferior_ptid);
1530 gdb_assert (tp != NULL);
1531
1532 bfun = NULL;
1533 next = get_next_frame (this_frame);
1534 if (next == NULL)
1535 {
1536 const struct btrace_insn_iterator *replay;
1537
1538 replay = tp->btrace.replay;
1539 if (replay != NULL)
1540 bfun = replay->function;
1541 }
1542 else
1543 {
1544 const struct btrace_function *callee;
1545
1546 callee = btrace_get_frame_function (next);
1547 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1548 bfun = callee->up;
1549 }
1550
1551 if (bfun == NULL)
1552 return 0;
1553
1554 DEBUG ("[frame] sniffed frame for %s on level %d",
1555 btrace_get_bfun_name (bfun), bfun->level);
1556
1557 /* This is our frame. Initialize the frame cache. */
1558 cache = bfcache_new (this_frame);
1559 cache->tp = tp;
1560 cache->bfun = bfun;
1561
1562 *this_cache = cache;
1563 return 1;
1564 }
1565
1566 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1567
1568 static int
1569 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1570 struct frame_info *this_frame,
1571 void **this_cache)
1572 {
1573 const struct btrace_function *bfun, *callee;
1574 struct btrace_frame_cache *cache;
1575 struct frame_info *next;
1576
1577 next = get_next_frame (this_frame);
1578 if (next == NULL)
1579 return 0;
1580
1581 callee = btrace_get_frame_function (next);
1582 if (callee == NULL)
1583 return 0;
1584
1585 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1586 return 0;
1587
1588 bfun = callee->up;
1589 if (bfun == NULL)
1590 return 0;
1591
1592 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1593 btrace_get_bfun_name (bfun), bfun->level);
1594
1595 /* This is our frame. Initialize the frame cache. */
1596 cache = bfcache_new (this_frame);
1597 cache->tp = find_thread_ptid (inferior_ptid);
1598 cache->bfun = bfun;
1599
1600 *this_cache = cache;
1601 return 1;
1602 }
1603
1604 static void
1605 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1606 {
1607 struct btrace_frame_cache *cache;
1608 void **slot;
1609
1610 cache = this_cache;
1611
1612 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1613 gdb_assert (slot != NULL);
1614
1615 htab_remove_elt (bfcache, cache);
1616 }
1617
1618 /* btrace recording does not store previous memory content, neither the stack
1619 frames content. Any unwinding would return errorneous results as the stack
1620 contents no longer matches the changed PC value restored from history.
1621 Therefore this unwinder reports any possibly unwound registers as
1622 <unavailable>. */
1623
1624 const struct frame_unwind record_btrace_frame_unwind =
1625 {
1626 NORMAL_FRAME,
1627 record_btrace_frame_unwind_stop_reason,
1628 record_btrace_frame_this_id,
1629 record_btrace_frame_prev_register,
1630 NULL,
1631 record_btrace_frame_sniffer,
1632 record_btrace_frame_dealloc_cache
1633 };
1634
1635 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1636 {
1637 TAILCALL_FRAME,
1638 record_btrace_frame_unwind_stop_reason,
1639 record_btrace_frame_this_id,
1640 record_btrace_frame_prev_register,
1641 NULL,
1642 record_btrace_tailcall_frame_sniffer,
1643 record_btrace_frame_dealloc_cache
1644 };
1645
1646 /* Implement the to_get_unwinder method. */
1647
1648 static const struct frame_unwind *
1649 record_btrace_to_get_unwinder (struct target_ops *self)
1650 {
1651 return &record_btrace_frame_unwind;
1652 }
1653
1654 /* Implement the to_get_tailcall_unwinder method. */
1655
1656 static const struct frame_unwind *
1657 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1658 {
1659 return &record_btrace_tailcall_frame_unwind;
1660 }
1661
1662 /* Return a human-readable string for FLAG. */
1663
1664 static const char *
1665 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1666 {
1667 switch (flag)
1668 {
1669 case BTHR_STEP:
1670 return "step";
1671
1672 case BTHR_RSTEP:
1673 return "reverse-step";
1674
1675 case BTHR_CONT:
1676 return "cont";
1677
1678 case BTHR_RCONT:
1679 return "reverse-cont";
1680
1681 case BTHR_STOP:
1682 return "stop";
1683 }
1684
1685 return "<invalid>";
1686 }
1687
1688 /* Indicate that TP should be resumed according to FLAG. */
1689
1690 static void
1691 record_btrace_resume_thread (struct thread_info *tp,
1692 enum btrace_thread_flag flag)
1693 {
1694 struct btrace_thread_info *btinfo;
1695
1696 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1697 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1698
1699 btinfo = &tp->btrace;
1700
1701 if ((btinfo->flags & BTHR_MOVE) != 0)
1702 error (_("Thread already moving."));
1703
1704 /* Fetch the latest branch trace. */
1705 btrace_fetch (tp);
1706
1707 /* A resume request overwrites a preceding stop request. */
1708 btinfo->flags &= ~BTHR_STOP;
1709 btinfo->flags |= flag;
1710 }
1711
1712 /* Find the thread to resume given a PTID. */
1713
1714 static struct thread_info *
1715 record_btrace_find_resume_thread (ptid_t ptid)
1716 {
1717 struct thread_info *tp;
1718
1719 /* When asked to resume everything, we pick the current thread. */
1720 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1721 ptid = inferior_ptid;
1722
1723 return find_thread_ptid (ptid);
1724 }
1725
1726 /* Start replaying a thread. */
1727
1728 static struct btrace_insn_iterator *
1729 record_btrace_start_replaying (struct thread_info *tp)
1730 {
1731 struct btrace_insn_iterator *replay;
1732 struct btrace_thread_info *btinfo;
1733 int executing;
1734
1735 btinfo = &tp->btrace;
1736 replay = NULL;
1737
1738 /* We can't start replaying without trace. */
1739 if (btinfo->begin == NULL)
1740 return NULL;
1741
1742 /* Clear the executing flag to allow changes to the current frame.
1743 We are not actually running, yet. We just started a reverse execution
1744 command or a record goto command.
1745 For the latter, EXECUTING is false and this has no effect.
1746 For the former, EXECUTING is true and we're in to_wait, about to
1747 move the thread. Since we need to recompute the stack, we temporarily
1748 set EXECUTING to flase. */
1749 executing = is_executing (tp->ptid);
1750 set_executing (tp->ptid, 0);
1751
1752 /* GDB stores the current frame_id when stepping in order to detects steps
1753 into subroutines.
1754 Since frames are computed differently when we're replaying, we need to
1755 recompute those stored frames and fix them up so we can still detect
1756 subroutines after we started replaying. */
1757 TRY
1758 {
1759 struct frame_info *frame;
1760 struct frame_id frame_id;
1761 int upd_step_frame_id, upd_step_stack_frame_id;
1762
1763 /* The current frame without replaying - computed via normal unwind. */
1764 frame = get_current_frame ();
1765 frame_id = get_frame_id (frame);
1766
1767 /* Check if we need to update any stepping-related frame id's. */
1768 upd_step_frame_id = frame_id_eq (frame_id,
1769 tp->control.step_frame_id);
1770 upd_step_stack_frame_id = frame_id_eq (frame_id,
1771 tp->control.step_stack_frame_id);
1772
1773 /* We start replaying at the end of the branch trace. This corresponds
1774 to the current instruction. */
1775 replay = XNEW (struct btrace_insn_iterator);
1776 btrace_insn_end (replay, btinfo);
1777
1778 /* Skip gaps at the end of the trace. */
1779 while (btrace_insn_get (replay) == NULL)
1780 {
1781 unsigned int steps;
1782
1783 steps = btrace_insn_prev (replay, 1);
1784 if (steps == 0)
1785 error (_("No trace."));
1786 }
1787
1788 /* We're not replaying, yet. */
1789 gdb_assert (btinfo->replay == NULL);
1790 btinfo->replay = replay;
1791
1792 /* Make sure we're not using any stale registers. */
1793 registers_changed_ptid (tp->ptid);
1794
1795 /* The current frame with replaying - computed via btrace unwind. */
1796 frame = get_current_frame ();
1797 frame_id = get_frame_id (frame);
1798
1799 /* Replace stepping related frames where necessary. */
1800 if (upd_step_frame_id)
1801 tp->control.step_frame_id = frame_id;
1802 if (upd_step_stack_frame_id)
1803 tp->control.step_stack_frame_id = frame_id;
1804 }
1805 CATCH (except, RETURN_MASK_ALL)
1806 {
1807 /* Restore the previous execution state. */
1808 set_executing (tp->ptid, executing);
1809
1810 xfree (btinfo->replay);
1811 btinfo->replay = NULL;
1812
1813 registers_changed_ptid (tp->ptid);
1814
1815 throw_exception (except);
1816 }
1817 END_CATCH
1818
1819 /* Restore the previous execution state. */
1820 set_executing (tp->ptid, executing);
1821
1822 return replay;
1823 }
1824
1825 /* Stop replaying a thread. */
1826
1827 static void
1828 record_btrace_stop_replaying (struct thread_info *tp)
1829 {
1830 struct btrace_thread_info *btinfo;
1831
1832 btinfo = &tp->btrace;
1833
1834 xfree (btinfo->replay);
1835 btinfo->replay = NULL;
1836
1837 /* Make sure we're not leaving any stale registers. */
1838 registers_changed_ptid (tp->ptid);
1839 }
1840
1841 /* The to_resume method of target record-btrace. */
1842
1843 static void
1844 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1845 enum gdb_signal signal)
1846 {
1847 struct thread_info *tp, *other;
1848 enum btrace_thread_flag flag;
1849
1850 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
1851 execution_direction == EXEC_REVERSE ? "reverse-" : "",
1852 step ? "step" : "cont");
1853
1854 /* Store the execution direction of the last resume. */
1855 record_btrace_resume_exec_dir = execution_direction;
1856
1857 tp = record_btrace_find_resume_thread (ptid);
1858 if (tp == NULL)
1859 error (_("Cannot find thread to resume."));
1860
1861 /* Stop replaying other threads if the thread to resume is not replaying. */
1862 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1863 ALL_NON_EXITED_THREADS (other)
1864 record_btrace_stop_replaying (other);
1865
1866 /* As long as we're not replaying, just forward the request. */
1867 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1868 {
1869 ops = ops->beneath;
1870 return ops->to_resume (ops, ptid, step, signal);
1871 }
1872
1873 /* Compute the btrace thread flag for the requested move. */
1874 if (step == 0)
1875 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1876 else
1877 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1878
1879 /* At the moment, we only move a single thread. We could also move
1880 all threads in parallel by single-stepping each resumed thread
1881 until the first runs into an event.
1882 When we do that, we would want to continue all other threads.
1883 For now, just resume one thread to not confuse to_wait. */
1884 record_btrace_resume_thread (tp, flag);
1885
1886 /* We just indicate the resume intent here. The actual stepping happens in
1887 record_btrace_wait below. */
1888
1889 /* Async support. */
1890 if (target_can_async_p ())
1891 {
1892 target_async (1);
1893 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1894 }
1895 }
1896
1897 /* Cancel resuming TP. */
1898
1899 static void
1900 record_btrace_cancel_resume (struct thread_info *tp)
1901 {
1902 enum btrace_thread_flag flags;
1903
1904 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
1905 if (flags == 0)
1906 return;
1907
1908 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
1909 target_pid_to_str (tp->ptid), flags,
1910 btrace_thread_flag_to_str (flags));
1911
1912 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
1913 }
1914
1915 /* Find a thread to move. */
1916
1917 static struct thread_info *
1918 record_btrace_find_thread_to_move (ptid_t ptid)
1919 {
1920 struct thread_info *tp;
1921
1922 /* First check the parameter thread. */
1923 tp = find_thread_ptid (ptid);
1924 if (tp != NULL && (tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
1925 return tp;
1926
1927 /* Otherwise, find one other thread that has been resumed. */
1928 ALL_NON_EXITED_THREADS (tp)
1929 if ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0)
1930 return tp;
1931
1932 return NULL;
1933 }
1934
1935 /* Return a target_waitstatus indicating that we ran out of history. */
1936
1937 static struct target_waitstatus
1938 btrace_step_no_history (void)
1939 {
1940 struct target_waitstatus status;
1941
1942 status.kind = TARGET_WAITKIND_NO_HISTORY;
1943
1944 return status;
1945 }
1946
1947 /* Return a target_waitstatus indicating that a step finished. */
1948
1949 static struct target_waitstatus
1950 btrace_step_stopped (void)
1951 {
1952 struct target_waitstatus status;
1953
1954 status.kind = TARGET_WAITKIND_STOPPED;
1955 status.value.sig = GDB_SIGNAL_TRAP;
1956
1957 return status;
1958 }
1959
1960 /* Return a target_waitstatus indicating that a thread was stopped as
1961 requested. */
1962
1963 static struct target_waitstatus
1964 btrace_step_stopped_on_request (void)
1965 {
1966 struct target_waitstatus status;
1967
1968 status.kind = TARGET_WAITKIND_STOPPED;
1969 status.value.sig = GDB_SIGNAL_0;
1970
1971 return status;
1972 }
1973
1974 /* Return a target_waitstatus indicating a spurious stop. */
1975
1976 static struct target_waitstatus
1977 btrace_step_spurious (void)
1978 {
1979 struct target_waitstatus status;
1980
1981 status.kind = TARGET_WAITKIND_SPURIOUS;
1982
1983 return status;
1984 }
1985
1986 /* Clear the record histories. */
1987
1988 static void
1989 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1990 {
1991 xfree (btinfo->insn_history);
1992 xfree (btinfo->call_history);
1993
1994 btinfo->insn_history = NULL;
1995 btinfo->call_history = NULL;
1996 }
1997
1998 /* Check whether TP's current replay position is at a breakpoint. */
1999
2000 static int
2001 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2002 {
2003 struct btrace_insn_iterator *replay;
2004 struct btrace_thread_info *btinfo;
2005 const struct btrace_insn *insn;
2006 struct inferior *inf;
2007
2008 btinfo = &tp->btrace;
2009 replay = btinfo->replay;
2010
2011 if (replay == NULL)
2012 return 0;
2013
2014 insn = btrace_insn_get (replay);
2015 if (insn == NULL)
2016 return 0;
2017
2018 inf = find_inferior_ptid (tp->ptid);
2019 if (inf == NULL)
2020 return 0;
2021
2022 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2023 &btinfo->stop_reason);
2024 }
2025
2026 /* Step one instruction in forward direction. */
2027
2028 static struct target_waitstatus
2029 record_btrace_single_step_forward (struct thread_info *tp)
2030 {
2031 struct btrace_insn_iterator *replay, end;
2032 struct btrace_thread_info *btinfo;
2033
2034 btinfo = &tp->btrace;
2035 replay = btinfo->replay;
2036
2037 /* We're done if we're not replaying. */
2038 if (replay == NULL)
2039 return btrace_step_no_history ();
2040
2041 /* Skip gaps during replay. */
2042 do
2043 {
2044 unsigned int steps;
2045
2046 steps = btrace_insn_next (replay, 1);
2047 if (steps == 0)
2048 {
2049 record_btrace_stop_replaying (tp);
2050 return btrace_step_no_history ();
2051 }
2052 }
2053 while (btrace_insn_get (replay) == NULL);
2054
2055 /* Determine the end of the instruction trace. */
2056 btrace_insn_end (&end, btinfo);
2057
2058 /* We stop replaying if we reached the end of the trace. */
2059 if (btrace_insn_cmp (replay, &end) == 0)
2060 record_btrace_stop_replaying (tp);
2061
2062 return btrace_step_spurious ();
2063 }
2064
2065 /* Step one instruction in backward direction. */
2066
2067 static struct target_waitstatus
2068 record_btrace_single_step_backward (struct thread_info *tp)
2069 {
2070 struct btrace_insn_iterator *replay;
2071 struct btrace_thread_info *btinfo;
2072
2073 btinfo = &tp->btrace;
2074 replay = btinfo->replay;
2075
2076 /* Start replaying if we're not already doing so. */
2077 if (replay == NULL)
2078 replay = record_btrace_start_replaying (tp);
2079
2080 /* If we can't step any further, we reached the end of the history.
2081 Skip gaps during replay. */
2082 do
2083 {
2084 unsigned int steps;
2085
2086 steps = btrace_insn_prev (replay, 1);
2087 if (steps == 0)
2088 return btrace_step_no_history ();
2089 }
2090 while (btrace_insn_get (replay) == NULL);
2091
2092 return btrace_step_spurious ();
2093 }
2094
2095 /* Step a single thread. */
2096
2097 static struct target_waitstatus
2098 record_btrace_step_thread (struct thread_info *tp)
2099 {
2100 struct btrace_thread_info *btinfo;
2101 struct target_waitstatus status;
2102 enum btrace_thread_flag flags;
2103
2104 btinfo = &tp->btrace;
2105
2106 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2107 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2108
2109 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2110 target_pid_to_str (tp->ptid), flags,
2111 btrace_thread_flag_to_str (flags));
2112
2113 /* We can't step without an execution history. */
2114 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2115 return btrace_step_no_history ();
2116
2117 switch (flags)
2118 {
2119 default:
2120 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2121
2122 case BTHR_STOP:
2123 return btrace_step_stopped_on_request ();
2124
2125 case BTHR_STEP:
2126 status = record_btrace_single_step_forward (tp);
2127 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2128 return status;
2129
2130 return btrace_step_stopped ();
2131
2132 case BTHR_RSTEP:
2133 status = record_btrace_single_step_backward (tp);
2134 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2135 return status;
2136
2137 return btrace_step_stopped ();
2138
2139 case BTHR_CONT:
2140 for (;;)
2141 {
2142 status = record_btrace_single_step_forward (tp);
2143 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2144 return status;
2145
2146 if (btinfo->replay != NULL)
2147 {
2148 const struct btrace_insn *insn;
2149
2150 insn = btrace_insn_get (btinfo->replay);
2151 gdb_assert (insn != NULL);
2152
2153 DEBUG ("stepping %d (%s) ... %s", tp->num,
2154 target_pid_to_str (tp->ptid),
2155 core_addr_to_string_nz (insn->pc));
2156 }
2157
2158 if (record_btrace_replay_at_breakpoint (tp))
2159 return btrace_step_stopped ();
2160 }
2161
2162 case BTHR_RCONT:
2163 for (;;)
2164 {
2165 const struct btrace_insn *insn;
2166
2167 status = record_btrace_single_step_backward (tp);
2168 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2169 return status;
2170
2171 gdb_assert (btinfo->replay != NULL);
2172
2173 insn = btrace_insn_get (btinfo->replay);
2174 gdb_assert (insn != NULL);
2175
2176 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
2177 target_pid_to_str (tp->ptid),
2178 core_addr_to_string_nz (insn->pc));
2179
2180 if (record_btrace_replay_at_breakpoint (tp))
2181 return btrace_step_stopped ();
2182 }
2183 }
2184 }
2185
2186 /* The to_wait method of target record-btrace. */
2187
2188 static ptid_t
2189 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2190 struct target_waitstatus *status, int options)
2191 {
2192 struct thread_info *tp, *other;
2193
2194 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2195
2196 /* As long as we're not replaying, just forward the request. */
2197 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2198 {
2199 ops = ops->beneath;
2200 return ops->to_wait (ops, ptid, status, options);
2201 }
2202
2203 /* Let's find a thread to move. */
2204 tp = record_btrace_find_thread_to_move (ptid);
2205 if (tp == NULL)
2206 {
2207 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2208
2209 status->kind = TARGET_WAITKIND_IGNORE;
2210 return minus_one_ptid;
2211 }
2212
2213 /* We only move a single thread. We're not able to correlate threads. */
2214 *status = record_btrace_step_thread (tp);
2215
2216 /* Stop all other threads. */
2217 if (!target_is_non_stop_p ())
2218 ALL_NON_EXITED_THREADS (other)
2219 record_btrace_cancel_resume (other);
2220
2221 /* Start record histories anew from the current position. */
2222 record_btrace_clear_histories (&tp->btrace);
2223
2224 /* We moved the replay position but did not update registers. */
2225 registers_changed_ptid (tp->ptid);
2226
2227 return tp->ptid;
2228 }
2229
2230 /* The to_stop method of target record-btrace. */
2231
2232 static void
2233 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2234 {
2235 DEBUG ("stop %s", target_pid_to_str (ptid));
2236
2237 /* As long as we're not replaying, just forward the request. */
2238 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2239 {
2240 ops = ops->beneath;
2241 ops->to_stop (ops, ptid);
2242 }
2243 else
2244 {
2245 struct thread_info *tp;
2246
2247 ALL_NON_EXITED_THREADS (tp)
2248 if (ptid_match (tp->ptid, ptid))
2249 {
2250 tp->btrace.flags &= ~BTHR_MOVE;
2251 tp->btrace.flags |= BTHR_STOP;
2252 }
2253 }
2254 }
2255
2256 /* The to_can_execute_reverse method of target record-btrace. */
2257
2258 static int
2259 record_btrace_can_execute_reverse (struct target_ops *self)
2260 {
2261 return 1;
2262 }
2263
2264 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2265
2266 static int
2267 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2268 {
2269 if (record_btrace_is_replaying (ops))
2270 {
2271 struct thread_info *tp = inferior_thread ();
2272
2273 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2274 }
2275
2276 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2277 }
2278
2279 /* The to_supports_stopped_by_sw_breakpoint method of target
2280 record-btrace. */
2281
2282 static int
2283 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2284 {
2285 if (record_btrace_is_replaying (ops))
2286 return 1;
2287
2288 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2289 }
2290
2291 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2292
2293 static int
2294 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2295 {
2296 if (record_btrace_is_replaying (ops))
2297 {
2298 struct thread_info *tp = inferior_thread ();
2299
2300 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2301 }
2302
2303 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2304 }
2305
2306 /* The to_supports_stopped_by_hw_breakpoint method of target
2307 record-btrace. */
2308
2309 static int
2310 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2311 {
2312 if (record_btrace_is_replaying (ops))
2313 return 1;
2314
2315 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2316 }
2317
2318 /* The to_update_thread_list method of target record-btrace. */
2319
2320 static void
2321 record_btrace_update_thread_list (struct target_ops *ops)
2322 {
2323 /* We don't add or remove threads during replay. */
2324 if (record_btrace_is_replaying (ops))
2325 return;
2326
2327 /* Forward the request. */
2328 ops = ops->beneath;
2329 ops->to_update_thread_list (ops);
2330 }
2331
2332 /* The to_thread_alive method of target record-btrace. */
2333
2334 static int
2335 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2336 {
2337 /* We don't add or remove threads during replay. */
2338 if (record_btrace_is_replaying (ops))
2339 return find_thread_ptid (ptid) != NULL;
2340
2341 /* Forward the request. */
2342 ops = ops->beneath;
2343 return ops->to_thread_alive (ops, ptid);
2344 }
2345
2346 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2347 is stopped. */
2348
2349 static void
2350 record_btrace_set_replay (struct thread_info *tp,
2351 const struct btrace_insn_iterator *it)
2352 {
2353 struct btrace_thread_info *btinfo;
2354
2355 btinfo = &tp->btrace;
2356
2357 if (it == NULL || it->function == NULL)
2358 record_btrace_stop_replaying (tp);
2359 else
2360 {
2361 if (btinfo->replay == NULL)
2362 record_btrace_start_replaying (tp);
2363 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2364 return;
2365
2366 *btinfo->replay = *it;
2367 registers_changed_ptid (tp->ptid);
2368 }
2369
2370 /* Start anew from the new replay position. */
2371 record_btrace_clear_histories (btinfo);
2372
2373 stop_pc = regcache_read_pc (get_current_regcache ());
2374 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2375 }
2376
2377 /* The to_goto_record_begin method of target record-btrace. */
2378
2379 static void
2380 record_btrace_goto_begin (struct target_ops *self)
2381 {
2382 struct thread_info *tp;
2383 struct btrace_insn_iterator begin;
2384
2385 tp = require_btrace_thread ();
2386
2387 btrace_insn_begin (&begin, &tp->btrace);
2388 record_btrace_set_replay (tp, &begin);
2389 }
2390
2391 /* The to_goto_record_end method of target record-btrace. */
2392
2393 static void
2394 record_btrace_goto_end (struct target_ops *ops)
2395 {
2396 struct thread_info *tp;
2397
2398 tp = require_btrace_thread ();
2399
2400 record_btrace_set_replay (tp, NULL);
2401 }
2402
2403 /* The to_goto_record method of target record-btrace. */
2404
2405 static void
2406 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2407 {
2408 struct thread_info *tp;
2409 struct btrace_insn_iterator it;
2410 unsigned int number;
2411 int found;
2412
2413 number = insn;
2414
2415 /* Check for wrap-arounds. */
2416 if (number != insn)
2417 error (_("Instruction number out of range."));
2418
2419 tp = require_btrace_thread ();
2420
2421 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2422 if (found == 0)
2423 error (_("No such instruction."));
2424
2425 record_btrace_set_replay (tp, &it);
2426 }
2427
2428 /* The to_execution_direction target method. */
2429
2430 static enum exec_direction_kind
2431 record_btrace_execution_direction (struct target_ops *self)
2432 {
2433 return record_btrace_resume_exec_dir;
2434 }
2435
2436 /* The to_prepare_to_generate_core target method. */
2437
2438 static void
2439 record_btrace_prepare_to_generate_core (struct target_ops *self)
2440 {
2441 record_btrace_generating_corefile = 1;
2442 }
2443
2444 /* The to_done_generating_core target method. */
2445
2446 static void
2447 record_btrace_done_generating_core (struct target_ops *self)
2448 {
2449 record_btrace_generating_corefile = 0;
2450 }
2451
2452 /* Initialize the record-btrace target ops. */
2453
2454 static void
2455 init_record_btrace_ops (void)
2456 {
2457 struct target_ops *ops;
2458
2459 ops = &record_btrace_ops;
2460 ops->to_shortname = "record-btrace";
2461 ops->to_longname = "Branch tracing target";
2462 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2463 ops->to_open = record_btrace_open;
2464 ops->to_close = record_btrace_close;
2465 ops->to_async = record_btrace_async;
2466 ops->to_detach = record_detach;
2467 ops->to_disconnect = record_disconnect;
2468 ops->to_mourn_inferior = record_mourn_inferior;
2469 ops->to_kill = record_kill;
2470 ops->to_stop_recording = record_btrace_stop_recording;
2471 ops->to_info_record = record_btrace_info;
2472 ops->to_insn_history = record_btrace_insn_history;
2473 ops->to_insn_history_from = record_btrace_insn_history_from;
2474 ops->to_insn_history_range = record_btrace_insn_history_range;
2475 ops->to_call_history = record_btrace_call_history;
2476 ops->to_call_history_from = record_btrace_call_history_from;
2477 ops->to_call_history_range = record_btrace_call_history_range;
2478 ops->to_record_is_replaying = record_btrace_is_replaying;
2479 ops->to_xfer_partial = record_btrace_xfer_partial;
2480 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2481 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2482 ops->to_fetch_registers = record_btrace_fetch_registers;
2483 ops->to_store_registers = record_btrace_store_registers;
2484 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2485 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2486 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2487 ops->to_resume = record_btrace_resume;
2488 ops->to_wait = record_btrace_wait;
2489 ops->to_stop = record_btrace_stop;
2490 ops->to_update_thread_list = record_btrace_update_thread_list;
2491 ops->to_thread_alive = record_btrace_thread_alive;
2492 ops->to_goto_record_begin = record_btrace_goto_begin;
2493 ops->to_goto_record_end = record_btrace_goto_end;
2494 ops->to_goto_record = record_btrace_goto;
2495 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2496 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2497 ops->to_supports_stopped_by_sw_breakpoint
2498 = record_btrace_supports_stopped_by_sw_breakpoint;
2499 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2500 ops->to_supports_stopped_by_hw_breakpoint
2501 = record_btrace_supports_stopped_by_hw_breakpoint;
2502 ops->to_execution_direction = record_btrace_execution_direction;
2503 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2504 ops->to_done_generating_core = record_btrace_done_generating_core;
2505 ops->to_stratum = record_stratum;
2506 ops->to_magic = OPS_MAGIC;
2507 }
2508
2509 /* Start recording in BTS format. */
2510
2511 static void
2512 cmd_record_btrace_bts_start (char *args, int from_tty)
2513 {
2514 if (args != NULL && *args != 0)
2515 error (_("Invalid argument."));
2516
2517 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2518
2519 TRY
2520 {
2521 execute_command ("target record-btrace", from_tty);
2522 }
2523 CATCH (exception, RETURN_MASK_ALL)
2524 {
2525 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2526 throw_exception (exception);
2527 }
2528 END_CATCH
2529 }
2530
2531 /* Start recording Intel(R) Processor Trace. */
2532
2533 static void
2534 cmd_record_btrace_pt_start (char *args, int from_tty)
2535 {
2536 if (args != NULL && *args != 0)
2537 error (_("Invalid argument."));
2538
2539 record_btrace_conf.format = BTRACE_FORMAT_PT;
2540
2541 TRY
2542 {
2543 execute_command ("target record-btrace", from_tty);
2544 }
2545 CATCH (exception, RETURN_MASK_ALL)
2546 {
2547 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2548 throw_exception (exception);
2549 }
2550 END_CATCH
2551 }
2552
2553 /* Alias for "target record". */
2554
2555 static void
2556 cmd_record_btrace_start (char *args, int from_tty)
2557 {
2558 if (args != NULL && *args != 0)
2559 error (_("Invalid argument."));
2560
2561 record_btrace_conf.format = BTRACE_FORMAT_PT;
2562
2563 TRY
2564 {
2565 execute_command ("target record-btrace", from_tty);
2566 }
2567 CATCH (exception, RETURN_MASK_ALL)
2568 {
2569 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2570
2571 TRY
2572 {
2573 execute_command ("target record-btrace", from_tty);
2574 }
2575 CATCH (exception, RETURN_MASK_ALL)
2576 {
2577 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2578 throw_exception (exception);
2579 }
2580 END_CATCH
2581 }
2582 END_CATCH
2583 }
2584
2585 /* The "set record btrace" command. */
2586
2587 static void
2588 cmd_set_record_btrace (char *args, int from_tty)
2589 {
2590 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2591 }
2592
2593 /* The "show record btrace" command. */
2594
2595 static void
2596 cmd_show_record_btrace (char *args, int from_tty)
2597 {
2598 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2599 }
2600
2601 /* The "show record btrace replay-memory-access" command. */
2602
2603 static void
2604 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2605 struct cmd_list_element *c, const char *value)
2606 {
2607 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2608 replay_memory_access);
2609 }
2610
2611 /* The "set record btrace bts" command. */
2612
2613 static void
2614 cmd_set_record_btrace_bts (char *args, int from_tty)
2615 {
2616 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2617 "by an appropriate subcommand.\n"));
2618 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2619 all_commands, gdb_stdout);
2620 }
2621
2622 /* The "show record btrace bts" command. */
2623
2624 static void
2625 cmd_show_record_btrace_bts (char *args, int from_tty)
2626 {
2627 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2628 }
2629
2630 /* The "set record btrace pt" command. */
2631
2632 static void
2633 cmd_set_record_btrace_pt (char *args, int from_tty)
2634 {
2635 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2636 "by an appropriate subcommand.\n"));
2637 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2638 all_commands, gdb_stdout);
2639 }
2640
2641 /* The "show record btrace pt" command. */
2642
2643 static void
2644 cmd_show_record_btrace_pt (char *args, int from_tty)
2645 {
2646 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2647 }
2648
2649 /* The "record bts buffer-size" show value function. */
2650
2651 static void
2652 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2653 struct cmd_list_element *c,
2654 const char *value)
2655 {
2656 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2657 value);
2658 }
2659
2660 /* The "record pt buffer-size" show value function. */
2661
2662 static void
2663 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2664 struct cmd_list_element *c,
2665 const char *value)
2666 {
2667 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2668 value);
2669 }
2670
2671 void _initialize_record_btrace (void);
2672
2673 /* Initialize btrace commands. */
2674
2675 void
2676 _initialize_record_btrace (void)
2677 {
2678 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2679 _("Start branch trace recording."), &record_btrace_cmdlist,
2680 "record btrace ", 0, &record_cmdlist);
2681 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2682
2683 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2684 _("\
2685 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2686 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2687 This format may not be available on all processors."),
2688 &record_btrace_cmdlist);
2689 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2690
2691 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2692 _("\
2693 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2694 This format may not be available on all processors."),
2695 &record_btrace_cmdlist);
2696 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2697
2698 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2699 _("Set record options"), &set_record_btrace_cmdlist,
2700 "set record btrace ", 0, &set_record_cmdlist);
2701
2702 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2703 _("Show record options"), &show_record_btrace_cmdlist,
2704 "show record btrace ", 0, &show_record_cmdlist);
2705
2706 add_setshow_enum_cmd ("replay-memory-access", no_class,
2707 replay_memory_access_types, &replay_memory_access, _("\
2708 Set what memory accesses are allowed during replay."), _("\
2709 Show what memory accesses are allowed during replay."),
2710 _("Default is READ-ONLY.\n\n\
2711 The btrace record target does not trace data.\n\
2712 The memory therefore corresponds to the live target and not \
2713 to the current replay position.\n\n\
2714 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2715 When READ-WRITE, allow accesses to read-only and read-write memory during \
2716 replay."),
2717 NULL, cmd_show_replay_memory_access,
2718 &set_record_btrace_cmdlist,
2719 &show_record_btrace_cmdlist);
2720
2721 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2722 _("Set record btrace bts options"),
2723 &set_record_btrace_bts_cmdlist,
2724 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2725
2726 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2727 _("Show record btrace bts options"),
2728 &show_record_btrace_bts_cmdlist,
2729 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2730
2731 add_setshow_uinteger_cmd ("buffer-size", no_class,
2732 &record_btrace_conf.bts.size,
2733 _("Set the record/replay bts buffer size."),
2734 _("Show the record/replay bts buffer size."), _("\
2735 When starting recording request a trace buffer of this size. \
2736 The actual buffer size may differ from the requested size. \
2737 Use \"info record\" to see the actual buffer size.\n\n\
2738 Bigger buffers allow longer recording but also take more time to process \
2739 the recorded execution trace.\n\n\
2740 The trace buffer size may not be changed while recording."), NULL,
2741 show_record_bts_buffer_size_value,
2742 &set_record_btrace_bts_cmdlist,
2743 &show_record_btrace_bts_cmdlist);
2744
2745 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2746 _("Set record btrace pt options"),
2747 &set_record_btrace_pt_cmdlist,
2748 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2749
2750 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2751 _("Show record btrace pt options"),
2752 &show_record_btrace_pt_cmdlist,
2753 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2754
2755 add_setshow_uinteger_cmd ("buffer-size", no_class,
2756 &record_btrace_conf.pt.size,
2757 _("Set the record/replay pt buffer size."),
2758 _("Show the record/replay pt buffer size."), _("\
2759 Bigger buffers allow longer recording but also take more time to process \
2760 the recorded execution.\n\
2761 The actual buffer size may differ from the requested size. Use \"info record\" \
2762 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2763 &set_record_btrace_pt_cmdlist,
2764 &show_record_btrace_pt_cmdlist);
2765
2766 init_record_btrace_ops ();
2767 add_target (&record_btrace_ops);
2768
2769 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2770 xcalloc, xfree);
2771
2772 record_btrace_conf.bts.size = 64 * 1024;
2773 record_btrace_conf.pt.size = 16 * 1024;
2774 }