]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Add target_ops argument to to_goto_record_begin
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38
39 /* The target_ops of record-btrace. */
40 static struct target_ops record_btrace_ops;
41
42 /* A new thread observer enabling branch tracing for the new thread. */
43 static struct observer *record_btrace_thread_observer;
44
45 /* Temporarily allow memory accesses. */
46 static int record_btrace_allow_memory_access;
47
48 /* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51 #define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61 /* Update the branch trace for the current thread and return a pointer to its
62 thread_info.
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
67 static struct thread_info *
68 require_btrace_thread (void)
69 {
70 struct thread_info *tp;
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
80 if (btrace_is_empty (tp))
81 error (_("No trace."));
82
83 return tp;
84 }
85
86 /* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92 static struct btrace_thread_info *
93 require_btrace (void)
94 {
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
100 }
101
102 /* Enable branch tracing for one thread. Warn on errors. */
103
104 static void
105 record_btrace_enable_warn (struct thread_info *tp)
106 {
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114 }
115
116 /* Callback function to disable branch tracing for one thread. */
117
118 static void
119 record_btrace_disable_callback (void *arg)
120 {
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126 }
127
128 /* Enable automatic tracing of new threads. */
129
130 static void
131 record_btrace_auto_enable (void)
132 {
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137 }
138
139 /* Disable automatic tracing of new threads. */
140
141 static void
142 record_btrace_auto_disable (void)
143 {
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152 }
153
154 /* The to_open method of target record-btrace. */
155
156 static void
157 record_btrace_open (char *args, int from_tty)
158 {
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
164 record_preopen ();
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193 }
194
195 /* The to_stop_recording method of target record-btrace. */
196
197 static void
198 record_btrace_stop_recording (struct target_ops *self)
199 {
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209 }
210
211 /* The to_close method of target record-btrace. */
212
213 static void
214 record_btrace_close (struct target_ops *self)
215 {
216 struct thread_info *tp;
217
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
221
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
226 }
227
228 /* The to_info_record method of target record-btrace. */
229
230 static void
231 record_btrace_info (struct target_ops *self)
232 {
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
235 unsigned int insns, calls;
236
237 DEBUG ("info");
238
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
242
243 btrace_fetch (tp);
244
245 insns = 0;
246 calls = 0;
247
248 btinfo = &tp->btrace;
249
250 if (!btrace_is_empty (tp))
251 {
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
254
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
257 calls = btrace_call_number (&call);
258
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
261 insns = btrace_insn_number (&insn);
262 }
263
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
265 "%d (%s).\n"), insns, calls, tp->num,
266 target_pid_to_str (tp->ptid));
267
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
271 }
272
273 /* Print an unsigned int. */
274
275 static void
276 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
277 {
278 ui_out_field_fmt (uiout, fld, "%u", val);
279 }
280
281 /* Disassemble a section of the recorded instruction trace. */
282
283 static void
284 btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
287 {
288 struct gdbarch *gdbarch;
289 struct btrace_insn_iterator it;
290
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
293
294 gdbarch = target_gdbarch ();
295
296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
297 {
298 const struct btrace_insn *insn;
299
300 insn = btrace_insn_get (&it);
301
302 /* Print the instruction index. */
303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
304 ui_out_text (uiout, "\t");
305
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
309 }
310 }
311
312 /* The to_insn_history method of target record-btrace. */
313
314 static void
315 record_btrace_insn_history (int size, int flags)
316 {
317 struct btrace_thread_info *btinfo;
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
322 unsigned int context, covered;
323
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
327 context = abs (size);
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
330
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
334 {
335 struct btrace_insn_iterator *replay;
336
337 DEBUG ("insn-history (0x%x): %d", flags, size);
338
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
346
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
352 {
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
357 }
358 else
359 {
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
362 }
363 }
364 else
365 {
366 begin = history->begin;
367 end = history->end;
368
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
371
372 if (size < 0)
373 {
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
376 }
377 else
378 {
379 begin = end;
380 covered = btrace_insn_next (&end, context);
381 }
382 }
383
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
387 {
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
392 }
393
394 btrace_set_insn_history (btinfo, &begin, &end);
395 do_cleanups (uiout_cleanup);
396 }
397
398 /* The to_insn_history_range method of target record-btrace. */
399
400 static void
401 record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
402 {
403 struct btrace_thread_info *btinfo;
404 struct btrace_insn_history *history;
405 struct btrace_insn_iterator begin, end;
406 struct cleanup *uiout_cleanup;
407 struct ui_out *uiout;
408 unsigned int low, high;
409 int found;
410
411 uiout = current_uiout;
412 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
413 "insn history");
414 low = from;
415 high = to;
416
417 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
418
419 /* Check for wrap-arounds. */
420 if (low != from || high != to)
421 error (_("Bad range."));
422
423 if (high < low)
424 error (_("Bad range."));
425
426 btinfo = require_btrace ();
427
428 found = btrace_find_insn_by_number (&begin, btinfo, low);
429 if (found == 0)
430 error (_("Range out of bounds."));
431
432 found = btrace_find_insn_by_number (&end, btinfo, high);
433 if (found == 0)
434 {
435 /* Silently truncate the range. */
436 btrace_insn_end (&end, btinfo);
437 }
438 else
439 {
440 /* We want both begin and end to be inclusive. */
441 btrace_insn_next (&end, 1);
442 }
443
444 btrace_insn_history (uiout, &begin, &end, flags);
445 btrace_set_insn_history (btinfo, &begin, &end);
446
447 do_cleanups (uiout_cleanup);
448 }
449
450 /* The to_insn_history_from method of target record-btrace. */
451
452 static void
453 record_btrace_insn_history_from (ULONGEST from, int size, int flags)
454 {
455 ULONGEST begin, end, context;
456
457 context = abs (size);
458 if (context == 0)
459 error (_("Bad record instruction-history-size."));
460
461 if (size < 0)
462 {
463 end = from;
464
465 if (from < context)
466 begin = 0;
467 else
468 begin = from - context + 1;
469 }
470 else
471 {
472 begin = from;
473 end = from + context - 1;
474
475 /* Check for wrap-around. */
476 if (end < begin)
477 end = ULONGEST_MAX;
478 }
479
480 record_btrace_insn_history_range (begin, end, flags);
481 }
482
483 /* Print the instruction number range for a function call history line. */
484
485 static void
486 btrace_call_history_insn_range (struct ui_out *uiout,
487 const struct btrace_function *bfun)
488 {
489 unsigned int begin, end, size;
490
491 size = VEC_length (btrace_insn_s, bfun->insn);
492 gdb_assert (size > 0);
493
494 begin = bfun->insn_offset;
495 end = begin + size - 1;
496
497 ui_out_field_uint (uiout, "insn begin", begin);
498 ui_out_text (uiout, ",");
499 ui_out_field_uint (uiout, "insn end", end);
500 }
501
502 /* Print the source line information for a function call history line. */
503
504 static void
505 btrace_call_history_src_line (struct ui_out *uiout,
506 const struct btrace_function *bfun)
507 {
508 struct symbol *sym;
509 int begin, end;
510
511 sym = bfun->sym;
512 if (sym == NULL)
513 return;
514
515 ui_out_field_string (uiout, "file",
516 symtab_to_filename_for_display (sym->symtab));
517
518 begin = bfun->lbegin;
519 end = bfun->lend;
520
521 if (end < begin)
522 return;
523
524 ui_out_text (uiout, ":");
525 ui_out_field_int (uiout, "min line", begin);
526
527 if (end == begin)
528 return;
529
530 ui_out_text (uiout, ",");
531 ui_out_field_int (uiout, "max line", end);
532 }
533
534 /* Get the name of a branch trace function. */
535
536 static const char *
537 btrace_get_bfun_name (const struct btrace_function *bfun)
538 {
539 struct minimal_symbol *msym;
540 struct symbol *sym;
541
542 if (bfun == NULL)
543 return "??";
544
545 msym = bfun->msym;
546 sym = bfun->sym;
547
548 if (sym != NULL)
549 return SYMBOL_PRINT_NAME (sym);
550 else if (msym != NULL)
551 return SYMBOL_PRINT_NAME (msym);
552 else
553 return "??";
554 }
555
556 /* Disassemble a section of the recorded function trace. */
557
558 static void
559 btrace_call_history (struct ui_out *uiout,
560 const struct btrace_thread_info *btinfo,
561 const struct btrace_call_iterator *begin,
562 const struct btrace_call_iterator *end,
563 enum record_print_flag flags)
564 {
565 struct btrace_call_iterator it;
566
567 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
568 btrace_call_number (end));
569
570 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
571 {
572 const struct btrace_function *bfun;
573 struct minimal_symbol *msym;
574 struct symbol *sym;
575
576 bfun = btrace_call_get (&it);
577 sym = bfun->sym;
578 msym = bfun->msym;
579
580 /* Print the function index. */
581 ui_out_field_uint (uiout, "index", bfun->number);
582 ui_out_text (uiout, "\t");
583
584 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
585 {
586 int level = bfun->level + btinfo->level, i;
587
588 for (i = 0; i < level; ++i)
589 ui_out_text (uiout, " ");
590 }
591
592 if (sym != NULL)
593 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
594 else if (msym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
596 else if (!ui_out_is_mi_like_p (uiout))
597 ui_out_field_string (uiout, "function", "??");
598
599 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
600 {
601 ui_out_text (uiout, _("\tinst "));
602 btrace_call_history_insn_range (uiout, bfun);
603 }
604
605 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
606 {
607 ui_out_text (uiout, _("\tat "));
608 btrace_call_history_src_line (uiout, bfun);
609 }
610
611 ui_out_text (uiout, "\n");
612 }
613 }
614
615 /* The to_call_history method of target record-btrace. */
616
617 static void
618 record_btrace_call_history (int size, int flags)
619 {
620 struct btrace_thread_info *btinfo;
621 struct btrace_call_history *history;
622 struct btrace_call_iterator begin, end;
623 struct cleanup *uiout_cleanup;
624 struct ui_out *uiout;
625 unsigned int context, covered;
626
627 uiout = current_uiout;
628 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
629 "insn history");
630 context = abs (size);
631 if (context == 0)
632 error (_("Bad record function-call-history-size."));
633
634 btinfo = require_btrace ();
635 history = btinfo->call_history;
636 if (history == NULL)
637 {
638 struct btrace_insn_iterator *replay;
639
640 DEBUG ("call-history (0x%x): %d", flags, size);
641
642 /* If we're replaying, we start at the replay position. Otherwise, we
643 start at the tail of the trace. */
644 replay = btinfo->replay;
645 if (replay != NULL)
646 {
647 begin.function = replay->function;
648 begin.btinfo = btinfo;
649 }
650 else
651 btrace_call_end (&begin, btinfo);
652
653 /* We start from here and expand in the requested direction. Then we
654 expand in the other direction, as well, to fill up any remaining
655 context. */
656 end = begin;
657 if (size < 0)
658 {
659 /* We want the current position covered, as well. */
660 covered = btrace_call_next (&end, 1);
661 covered += btrace_call_prev (&begin, context - covered);
662 covered += btrace_call_next (&end, context - covered);
663 }
664 else
665 {
666 covered = btrace_call_next (&end, context);
667 covered += btrace_call_prev (&begin, context- covered);
668 }
669 }
670 else
671 {
672 begin = history->begin;
673 end = history->end;
674
675 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
676 btrace_call_number (&begin), btrace_call_number (&end));
677
678 if (size < 0)
679 {
680 end = begin;
681 covered = btrace_call_prev (&begin, context);
682 }
683 else
684 {
685 begin = end;
686 covered = btrace_call_next (&end, context);
687 }
688 }
689
690 if (covered > 0)
691 btrace_call_history (uiout, btinfo, &begin, &end, flags);
692 else
693 {
694 if (size < 0)
695 printf_unfiltered (_("At the start of the branch trace record.\n"));
696 else
697 printf_unfiltered (_("At the end of the branch trace record.\n"));
698 }
699
700 btrace_set_call_history (btinfo, &begin, &end);
701 do_cleanups (uiout_cleanup);
702 }
703
704 /* The to_call_history_range method of target record-btrace. */
705
706 static void
707 record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
708 {
709 struct btrace_thread_info *btinfo;
710 struct btrace_call_history *history;
711 struct btrace_call_iterator begin, end;
712 struct cleanup *uiout_cleanup;
713 struct ui_out *uiout;
714 unsigned int low, high;
715 int found;
716
717 uiout = current_uiout;
718 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
719 "func history");
720 low = from;
721 high = to;
722
723 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
724
725 /* Check for wrap-arounds. */
726 if (low != from || high != to)
727 error (_("Bad range."));
728
729 if (high < low)
730 error (_("Bad range."));
731
732 btinfo = require_btrace ();
733
734 found = btrace_find_call_by_number (&begin, btinfo, low);
735 if (found == 0)
736 error (_("Range out of bounds."));
737
738 found = btrace_find_call_by_number (&end, btinfo, high);
739 if (found == 0)
740 {
741 /* Silently truncate the range. */
742 btrace_call_end (&end, btinfo);
743 }
744 else
745 {
746 /* We want both begin and end to be inclusive. */
747 btrace_call_next (&end, 1);
748 }
749
750 btrace_call_history (uiout, btinfo, &begin, &end, flags);
751 btrace_set_call_history (btinfo, &begin, &end);
752
753 do_cleanups (uiout_cleanup);
754 }
755
756 /* The to_call_history_from method of target record-btrace. */
757
758 static void
759 record_btrace_call_history_from (ULONGEST from, int size, int flags)
760 {
761 ULONGEST begin, end, context;
762
763 context = abs (size);
764 if (context == 0)
765 error (_("Bad record function-call-history-size."));
766
767 if (size < 0)
768 {
769 end = from;
770
771 if (from < context)
772 begin = 0;
773 else
774 begin = from - context + 1;
775 }
776 else
777 {
778 begin = from;
779 end = from + context - 1;
780
781 /* Check for wrap-around. */
782 if (end < begin)
783 end = ULONGEST_MAX;
784 }
785
786 record_btrace_call_history_range (begin, end, flags);
787 }
788
789 /* The to_record_is_replaying method of target record-btrace. */
790
791 static int
792 record_btrace_is_replaying (struct target_ops *self)
793 {
794 struct thread_info *tp;
795
796 ALL_THREADS (tp)
797 if (btrace_is_replaying (tp))
798 return 1;
799
800 return 0;
801 }
802
803 /* The to_xfer_partial method of target record-btrace. */
804
805 static enum target_xfer_status
806 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
807 const char *annex, gdb_byte *readbuf,
808 const gdb_byte *writebuf, ULONGEST offset,
809 ULONGEST len, ULONGEST *xfered_len)
810 {
811 struct target_ops *t;
812
813 /* Filter out requests that don't make sense during replay. */
814 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
815 {
816 switch (object)
817 {
818 case TARGET_OBJECT_MEMORY:
819 {
820 struct target_section *section;
821
822 /* We do not allow writing memory in general. */
823 if (writebuf != NULL)
824 {
825 *xfered_len = len;
826 return TARGET_XFER_E_UNAVAILABLE;
827 }
828
829 /* We allow reading readonly memory. */
830 section = target_section_by_addr (ops, offset);
831 if (section != NULL)
832 {
833 /* Check if the section we found is readonly. */
834 if ((bfd_get_section_flags (section->the_bfd_section->owner,
835 section->the_bfd_section)
836 & SEC_READONLY) != 0)
837 {
838 /* Truncate the request to fit into this section. */
839 len = min (len, section->endaddr - offset);
840 break;
841 }
842 }
843
844 *xfered_len = len;
845 return TARGET_XFER_E_UNAVAILABLE;
846 }
847 }
848 }
849
850 /* Forward the request. */
851 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
852 if (ops->to_xfer_partial != NULL)
853 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
854 offset, len, xfered_len);
855
856 *xfered_len = len;
857 return TARGET_XFER_E_UNAVAILABLE;
858 }
859
860 /* The to_insert_breakpoint method of target record-btrace. */
861
862 static int
863 record_btrace_insert_breakpoint (struct target_ops *ops,
864 struct gdbarch *gdbarch,
865 struct bp_target_info *bp_tgt)
866 {
867 volatile struct gdb_exception except;
868 int old, ret;
869
870 /* Inserting breakpoints requires accessing memory. Allow it for the
871 duration of this function. */
872 old = record_btrace_allow_memory_access;
873 record_btrace_allow_memory_access = 1;
874
875 ret = 0;
876 TRY_CATCH (except, RETURN_MASK_ALL)
877 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
878
879 record_btrace_allow_memory_access = old;
880
881 if (except.reason < 0)
882 throw_exception (except);
883
884 return ret;
885 }
886
887 /* The to_remove_breakpoint method of target record-btrace. */
888
889 static int
890 record_btrace_remove_breakpoint (struct target_ops *ops,
891 struct gdbarch *gdbarch,
892 struct bp_target_info *bp_tgt)
893 {
894 volatile struct gdb_exception except;
895 int old, ret;
896
897 /* Removing breakpoints requires accessing memory. Allow it for the
898 duration of this function. */
899 old = record_btrace_allow_memory_access;
900 record_btrace_allow_memory_access = 1;
901
902 ret = 0;
903 TRY_CATCH (except, RETURN_MASK_ALL)
904 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
905
906 record_btrace_allow_memory_access = old;
907
908 if (except.reason < 0)
909 throw_exception (except);
910
911 return ret;
912 }
913
914 /* The to_fetch_registers method of target record-btrace. */
915
916 static void
917 record_btrace_fetch_registers (struct target_ops *ops,
918 struct regcache *regcache, int regno)
919 {
920 struct btrace_insn_iterator *replay;
921 struct thread_info *tp;
922
923 tp = find_thread_ptid (inferior_ptid);
924 gdb_assert (tp != NULL);
925
926 replay = tp->btrace.replay;
927 if (replay != NULL)
928 {
929 const struct btrace_insn *insn;
930 struct gdbarch *gdbarch;
931 int pcreg;
932
933 gdbarch = get_regcache_arch (regcache);
934 pcreg = gdbarch_pc_regnum (gdbarch);
935 if (pcreg < 0)
936 return;
937
938 /* We can only provide the PC register. */
939 if (regno >= 0 && regno != pcreg)
940 return;
941
942 insn = btrace_insn_get (replay);
943 gdb_assert (insn != NULL);
944
945 regcache_raw_supply (regcache, regno, &insn->pc);
946 }
947 else
948 {
949 struct target_ops *t;
950
951 for (t = ops->beneath; t != NULL; t = t->beneath)
952 if (t->to_fetch_registers != NULL)
953 {
954 t->to_fetch_registers (t, regcache, regno);
955 break;
956 }
957 }
958 }
959
960 /* The to_store_registers method of target record-btrace. */
961
962 static void
963 record_btrace_store_registers (struct target_ops *ops,
964 struct regcache *regcache, int regno)
965 {
966 struct target_ops *t;
967
968 if (record_btrace_is_replaying (ops))
969 error (_("This record target does not allow writing registers."));
970
971 gdb_assert (may_write_registers != 0);
972
973 for (t = ops->beneath; t != NULL; t = t->beneath)
974 if (t->to_store_registers != NULL)
975 {
976 t->to_store_registers (t, regcache, regno);
977 return;
978 }
979
980 noprocess ();
981 }
982
983 /* The to_prepare_to_store method of target record-btrace. */
984
985 static void
986 record_btrace_prepare_to_store (struct target_ops *ops,
987 struct regcache *regcache)
988 {
989 struct target_ops *t;
990
991 if (record_btrace_is_replaying (ops))
992 return;
993
994 for (t = ops->beneath; t != NULL; t = t->beneath)
995 if (t->to_prepare_to_store != NULL)
996 {
997 t->to_prepare_to_store (t, regcache);
998 return;
999 }
1000 }
1001
1002 /* The branch trace frame cache. */
1003
1004 struct btrace_frame_cache
1005 {
1006 /* The thread. */
1007 struct thread_info *tp;
1008
1009 /* The frame info. */
1010 struct frame_info *frame;
1011
1012 /* The branch trace function segment. */
1013 const struct btrace_function *bfun;
1014 };
1015
1016 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1017
1018 static htab_t bfcache;
1019
1020 /* hash_f for htab_create_alloc of bfcache. */
1021
1022 static hashval_t
1023 bfcache_hash (const void *arg)
1024 {
1025 const struct btrace_frame_cache *cache = arg;
1026
1027 return htab_hash_pointer (cache->frame);
1028 }
1029
1030 /* eq_f for htab_create_alloc of bfcache. */
1031
1032 static int
1033 bfcache_eq (const void *arg1, const void *arg2)
1034 {
1035 const struct btrace_frame_cache *cache1 = arg1;
1036 const struct btrace_frame_cache *cache2 = arg2;
1037
1038 return cache1->frame == cache2->frame;
1039 }
1040
1041 /* Create a new btrace frame cache. */
1042
1043 static struct btrace_frame_cache *
1044 bfcache_new (struct frame_info *frame)
1045 {
1046 struct btrace_frame_cache *cache;
1047 void **slot;
1048
1049 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1050 cache->frame = frame;
1051
1052 slot = htab_find_slot (bfcache, cache, INSERT);
1053 gdb_assert (*slot == NULL);
1054 *slot = cache;
1055
1056 return cache;
1057 }
1058
1059 /* Extract the branch trace function from a branch trace frame. */
1060
1061 static const struct btrace_function *
1062 btrace_get_frame_function (struct frame_info *frame)
1063 {
1064 const struct btrace_frame_cache *cache;
1065 const struct btrace_function *bfun;
1066 struct btrace_frame_cache pattern;
1067 void **slot;
1068
1069 pattern.frame = frame;
1070
1071 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1072 if (slot == NULL)
1073 return NULL;
1074
1075 cache = *slot;
1076 return cache->bfun;
1077 }
1078
1079 /* Implement stop_reason method for record_btrace_frame_unwind. */
1080
1081 static enum unwind_stop_reason
1082 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1083 void **this_cache)
1084 {
1085 const struct btrace_frame_cache *cache;
1086 const struct btrace_function *bfun;
1087
1088 cache = *this_cache;
1089 bfun = cache->bfun;
1090 gdb_assert (bfun != NULL);
1091
1092 if (bfun->up == NULL)
1093 return UNWIND_UNAVAILABLE;
1094
1095 return UNWIND_NO_REASON;
1096 }
1097
1098 /* Implement this_id method for record_btrace_frame_unwind. */
1099
1100 static void
1101 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1102 struct frame_id *this_id)
1103 {
1104 const struct btrace_frame_cache *cache;
1105 const struct btrace_function *bfun;
1106 CORE_ADDR code, special;
1107
1108 cache = *this_cache;
1109
1110 bfun = cache->bfun;
1111 gdb_assert (bfun != NULL);
1112
1113 while (bfun->segment.prev != NULL)
1114 bfun = bfun->segment.prev;
1115
1116 code = get_frame_func (this_frame);
1117 special = bfun->number;
1118
1119 *this_id = frame_id_build_unavailable_stack_special (code, special);
1120
1121 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1122 btrace_get_bfun_name (cache->bfun),
1123 core_addr_to_string_nz (this_id->code_addr),
1124 core_addr_to_string_nz (this_id->special_addr));
1125 }
1126
1127 /* Implement prev_register method for record_btrace_frame_unwind. */
1128
1129 static struct value *
1130 record_btrace_frame_prev_register (struct frame_info *this_frame,
1131 void **this_cache,
1132 int regnum)
1133 {
1134 const struct btrace_frame_cache *cache;
1135 const struct btrace_function *bfun, *caller;
1136 const struct btrace_insn *insn;
1137 struct gdbarch *gdbarch;
1138 CORE_ADDR pc;
1139 int pcreg;
1140
1141 gdbarch = get_frame_arch (this_frame);
1142 pcreg = gdbarch_pc_regnum (gdbarch);
1143 if (pcreg < 0 || regnum != pcreg)
1144 throw_error (NOT_AVAILABLE_ERROR,
1145 _("Registers are not available in btrace record history"));
1146
1147 cache = *this_cache;
1148 bfun = cache->bfun;
1149 gdb_assert (bfun != NULL);
1150
1151 caller = bfun->up;
1152 if (caller == NULL)
1153 throw_error (NOT_AVAILABLE_ERROR,
1154 _("No caller in btrace record history"));
1155
1156 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1157 {
1158 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1159 pc = insn->pc;
1160 }
1161 else
1162 {
1163 insn = VEC_last (btrace_insn_s, caller->insn);
1164 pc = insn->pc;
1165
1166 pc += gdb_insn_length (gdbarch, pc);
1167 }
1168
1169 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1170 btrace_get_bfun_name (bfun), bfun->level,
1171 core_addr_to_string_nz (pc));
1172
1173 return frame_unwind_got_address (this_frame, regnum, pc);
1174 }
1175
1176 /* Implement sniffer method for record_btrace_frame_unwind. */
1177
1178 static int
1179 record_btrace_frame_sniffer (const struct frame_unwind *self,
1180 struct frame_info *this_frame,
1181 void **this_cache)
1182 {
1183 const struct btrace_function *bfun;
1184 struct btrace_frame_cache *cache;
1185 struct thread_info *tp;
1186 struct frame_info *next;
1187
1188 /* THIS_FRAME does not contain a reference to its thread. */
1189 tp = find_thread_ptid (inferior_ptid);
1190 gdb_assert (tp != NULL);
1191
1192 bfun = NULL;
1193 next = get_next_frame (this_frame);
1194 if (next == NULL)
1195 {
1196 const struct btrace_insn_iterator *replay;
1197
1198 replay = tp->btrace.replay;
1199 if (replay != NULL)
1200 bfun = replay->function;
1201 }
1202 else
1203 {
1204 const struct btrace_function *callee;
1205
1206 callee = btrace_get_frame_function (next);
1207 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1208 bfun = callee->up;
1209 }
1210
1211 if (bfun == NULL)
1212 return 0;
1213
1214 DEBUG ("[frame] sniffed frame for %s on level %d",
1215 btrace_get_bfun_name (bfun), bfun->level);
1216
1217 /* This is our frame. Initialize the frame cache. */
1218 cache = bfcache_new (this_frame);
1219 cache->tp = tp;
1220 cache->bfun = bfun;
1221
1222 *this_cache = cache;
1223 return 1;
1224 }
1225
1226 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1227
1228 static int
1229 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1230 struct frame_info *this_frame,
1231 void **this_cache)
1232 {
1233 const struct btrace_function *bfun, *callee;
1234 struct btrace_frame_cache *cache;
1235 struct frame_info *next;
1236
1237 next = get_next_frame (this_frame);
1238 if (next == NULL)
1239 return 0;
1240
1241 callee = btrace_get_frame_function (next);
1242 if (callee == NULL)
1243 return 0;
1244
1245 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1246 return 0;
1247
1248 bfun = callee->up;
1249 if (bfun == NULL)
1250 return 0;
1251
1252 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1253 btrace_get_bfun_name (bfun), bfun->level);
1254
1255 /* This is our frame. Initialize the frame cache. */
1256 cache = bfcache_new (this_frame);
1257 cache->tp = find_thread_ptid (inferior_ptid);
1258 cache->bfun = bfun;
1259
1260 *this_cache = cache;
1261 return 1;
1262 }
1263
1264 static void
1265 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1266 {
1267 struct btrace_frame_cache *cache;
1268 void **slot;
1269
1270 cache = this_cache;
1271
1272 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1273 gdb_assert (slot != NULL);
1274
1275 htab_remove_elt (bfcache, cache);
1276 }
1277
1278 /* btrace recording does not store previous memory content, neither the stack
1279 frames content. Any unwinding would return errorneous results as the stack
1280 contents no longer matches the changed PC value restored from history.
1281 Therefore this unwinder reports any possibly unwound registers as
1282 <unavailable>. */
1283
1284 const struct frame_unwind record_btrace_frame_unwind =
1285 {
1286 NORMAL_FRAME,
1287 record_btrace_frame_unwind_stop_reason,
1288 record_btrace_frame_this_id,
1289 record_btrace_frame_prev_register,
1290 NULL,
1291 record_btrace_frame_sniffer,
1292 record_btrace_frame_dealloc_cache
1293 };
1294
1295 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1296 {
1297 TAILCALL_FRAME,
1298 record_btrace_frame_unwind_stop_reason,
1299 record_btrace_frame_this_id,
1300 record_btrace_frame_prev_register,
1301 NULL,
1302 record_btrace_tailcall_frame_sniffer,
1303 record_btrace_frame_dealloc_cache
1304 };
1305
1306 /* Indicate that TP should be resumed according to FLAG. */
1307
1308 static void
1309 record_btrace_resume_thread (struct thread_info *tp,
1310 enum btrace_thread_flag flag)
1311 {
1312 struct btrace_thread_info *btinfo;
1313
1314 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1315
1316 btinfo = &tp->btrace;
1317
1318 if ((btinfo->flags & BTHR_MOVE) != 0)
1319 error (_("Thread already moving."));
1320
1321 /* Fetch the latest branch trace. */
1322 btrace_fetch (tp);
1323
1324 btinfo->flags |= flag;
1325 }
1326
1327 /* Find the thread to resume given a PTID. */
1328
1329 static struct thread_info *
1330 record_btrace_find_resume_thread (ptid_t ptid)
1331 {
1332 struct thread_info *tp;
1333
1334 /* When asked to resume everything, we pick the current thread. */
1335 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1336 ptid = inferior_ptid;
1337
1338 return find_thread_ptid (ptid);
1339 }
1340
1341 /* Start replaying a thread. */
1342
1343 static struct btrace_insn_iterator *
1344 record_btrace_start_replaying (struct thread_info *tp)
1345 {
1346 volatile struct gdb_exception except;
1347 struct btrace_insn_iterator *replay;
1348 struct btrace_thread_info *btinfo;
1349 int executing;
1350
1351 btinfo = &tp->btrace;
1352 replay = NULL;
1353
1354 /* We can't start replaying without trace. */
1355 if (btinfo->begin == NULL)
1356 return NULL;
1357
1358 /* Clear the executing flag to allow changes to the current frame.
1359 We are not actually running, yet. We just started a reverse execution
1360 command or a record goto command.
1361 For the latter, EXECUTING is false and this has no effect.
1362 For the former, EXECUTING is true and we're in to_wait, about to
1363 move the thread. Since we need to recompute the stack, we temporarily
1364 set EXECUTING to flase. */
1365 executing = is_executing (tp->ptid);
1366 set_executing (tp->ptid, 0);
1367
1368 /* GDB stores the current frame_id when stepping in order to detects steps
1369 into subroutines.
1370 Since frames are computed differently when we're replaying, we need to
1371 recompute those stored frames and fix them up so we can still detect
1372 subroutines after we started replaying. */
1373 TRY_CATCH (except, RETURN_MASK_ALL)
1374 {
1375 struct frame_info *frame;
1376 struct frame_id frame_id;
1377 int upd_step_frame_id, upd_step_stack_frame_id;
1378
1379 /* The current frame without replaying - computed via normal unwind. */
1380 frame = get_current_frame ();
1381 frame_id = get_frame_id (frame);
1382
1383 /* Check if we need to update any stepping-related frame id's. */
1384 upd_step_frame_id = frame_id_eq (frame_id,
1385 tp->control.step_frame_id);
1386 upd_step_stack_frame_id = frame_id_eq (frame_id,
1387 tp->control.step_stack_frame_id);
1388
1389 /* We start replaying at the end of the branch trace. This corresponds
1390 to the current instruction. */
1391 replay = xmalloc (sizeof (*replay));
1392 btrace_insn_end (replay, btinfo);
1393
1394 /* We're not replaying, yet. */
1395 gdb_assert (btinfo->replay == NULL);
1396 btinfo->replay = replay;
1397
1398 /* Make sure we're not using any stale registers. */
1399 registers_changed_ptid (tp->ptid);
1400
1401 /* The current frame with replaying - computed via btrace unwind. */
1402 frame = get_current_frame ();
1403 frame_id = get_frame_id (frame);
1404
1405 /* Replace stepping related frames where necessary. */
1406 if (upd_step_frame_id)
1407 tp->control.step_frame_id = frame_id;
1408 if (upd_step_stack_frame_id)
1409 tp->control.step_stack_frame_id = frame_id;
1410 }
1411
1412 /* Restore the previous execution state. */
1413 set_executing (tp->ptid, executing);
1414
1415 if (except.reason < 0)
1416 {
1417 xfree (btinfo->replay);
1418 btinfo->replay = NULL;
1419
1420 registers_changed_ptid (tp->ptid);
1421
1422 throw_exception (except);
1423 }
1424
1425 return replay;
1426 }
1427
1428 /* Stop replaying a thread. */
1429
1430 static void
1431 record_btrace_stop_replaying (struct thread_info *tp)
1432 {
1433 struct btrace_thread_info *btinfo;
1434
1435 btinfo = &tp->btrace;
1436
1437 xfree (btinfo->replay);
1438 btinfo->replay = NULL;
1439
1440 /* Make sure we're not leaving any stale registers. */
1441 registers_changed_ptid (tp->ptid);
1442 }
1443
1444 /* The to_resume method of target record-btrace. */
1445
1446 static void
1447 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1448 enum gdb_signal signal)
1449 {
1450 struct thread_info *tp, *other;
1451 enum btrace_thread_flag flag;
1452
1453 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1454
1455 tp = record_btrace_find_resume_thread (ptid);
1456 if (tp == NULL)
1457 error (_("Cannot find thread to resume."));
1458
1459 /* Stop replaying other threads if the thread to resume is not replaying. */
1460 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1461 ALL_THREADS (other)
1462 record_btrace_stop_replaying (other);
1463
1464 /* As long as we're not replaying, just forward the request. */
1465 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1466 {
1467 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1468 if (ops->to_resume != NULL)
1469 return ops->to_resume (ops, ptid, step, signal);
1470
1471 error (_("Cannot find target for stepping."));
1472 }
1473
1474 /* Compute the btrace thread flag for the requested move. */
1475 if (step == 0)
1476 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1477 else
1478 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1479
1480 /* At the moment, we only move a single thread. We could also move
1481 all threads in parallel by single-stepping each resumed thread
1482 until the first runs into an event.
1483 When we do that, we would want to continue all other threads.
1484 For now, just resume one thread to not confuse to_wait. */
1485 record_btrace_resume_thread (tp, flag);
1486
1487 /* We just indicate the resume intent here. The actual stepping happens in
1488 record_btrace_wait below. */
1489 }
1490
1491 /* Find a thread to move. */
1492
1493 static struct thread_info *
1494 record_btrace_find_thread_to_move (ptid_t ptid)
1495 {
1496 struct thread_info *tp;
1497
1498 /* First check the parameter thread. */
1499 tp = find_thread_ptid (ptid);
1500 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1501 return tp;
1502
1503 /* Otherwise, find one other thread that has been resumed. */
1504 ALL_THREADS (tp)
1505 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1506 return tp;
1507
1508 return NULL;
1509 }
1510
1511 /* Return a target_waitstatus indicating that we ran out of history. */
1512
1513 static struct target_waitstatus
1514 btrace_step_no_history (void)
1515 {
1516 struct target_waitstatus status;
1517
1518 status.kind = TARGET_WAITKIND_NO_HISTORY;
1519
1520 return status;
1521 }
1522
1523 /* Return a target_waitstatus indicating that a step finished. */
1524
1525 static struct target_waitstatus
1526 btrace_step_stopped (void)
1527 {
1528 struct target_waitstatus status;
1529
1530 status.kind = TARGET_WAITKIND_STOPPED;
1531 status.value.sig = GDB_SIGNAL_TRAP;
1532
1533 return status;
1534 }
1535
1536 /* Clear the record histories. */
1537
1538 static void
1539 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1540 {
1541 xfree (btinfo->insn_history);
1542 xfree (btinfo->call_history);
1543
1544 btinfo->insn_history = NULL;
1545 btinfo->call_history = NULL;
1546 }
1547
1548 /* Step a single thread. */
1549
1550 static struct target_waitstatus
1551 record_btrace_step_thread (struct thread_info *tp)
1552 {
1553 struct btrace_insn_iterator *replay, end;
1554 struct btrace_thread_info *btinfo;
1555 struct address_space *aspace;
1556 struct inferior *inf;
1557 enum btrace_thread_flag flags;
1558 unsigned int steps;
1559
1560 btinfo = &tp->btrace;
1561 replay = btinfo->replay;
1562
1563 flags = btinfo->flags & BTHR_MOVE;
1564 btinfo->flags &= ~BTHR_MOVE;
1565
1566 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1567
1568 switch (flags)
1569 {
1570 default:
1571 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1572
1573 case BTHR_STEP:
1574 /* We're done if we're not replaying. */
1575 if (replay == NULL)
1576 return btrace_step_no_history ();
1577
1578 /* We are always able to step at least once. */
1579 steps = btrace_insn_next (replay, 1);
1580 gdb_assert (steps == 1);
1581
1582 /* Determine the end of the instruction trace. */
1583 btrace_insn_end (&end, btinfo);
1584
1585 /* We stop replaying if we reached the end of the trace. */
1586 if (btrace_insn_cmp (replay, &end) == 0)
1587 record_btrace_stop_replaying (tp);
1588
1589 return btrace_step_stopped ();
1590
1591 case BTHR_RSTEP:
1592 /* Start replaying if we're not already doing so. */
1593 if (replay == NULL)
1594 replay = record_btrace_start_replaying (tp);
1595
1596 /* If we can't step any further, we reached the end of the history. */
1597 steps = btrace_insn_prev (replay, 1);
1598 if (steps == 0)
1599 return btrace_step_no_history ();
1600
1601 return btrace_step_stopped ();
1602
1603 case BTHR_CONT:
1604 /* We're done if we're not replaying. */
1605 if (replay == NULL)
1606 return btrace_step_no_history ();
1607
1608 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1609 aspace = inf->aspace;
1610
1611 /* Determine the end of the instruction trace. */
1612 btrace_insn_end (&end, btinfo);
1613
1614 for (;;)
1615 {
1616 const struct btrace_insn *insn;
1617
1618 /* We are always able to step at least once. */
1619 steps = btrace_insn_next (replay, 1);
1620 gdb_assert (steps == 1);
1621
1622 /* We stop replaying if we reached the end of the trace. */
1623 if (btrace_insn_cmp (replay, &end) == 0)
1624 {
1625 record_btrace_stop_replaying (tp);
1626 return btrace_step_no_history ();
1627 }
1628
1629 insn = btrace_insn_get (replay);
1630 gdb_assert (insn);
1631
1632 DEBUG ("stepping %d (%s) ... %s", tp->num,
1633 target_pid_to_str (tp->ptid),
1634 core_addr_to_string_nz (insn->pc));
1635
1636 if (breakpoint_here_p (aspace, insn->pc))
1637 return btrace_step_stopped ();
1638 }
1639
1640 case BTHR_RCONT:
1641 /* Start replaying if we're not already doing so. */
1642 if (replay == NULL)
1643 replay = record_btrace_start_replaying (tp);
1644
1645 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1646 aspace = inf->aspace;
1647
1648 for (;;)
1649 {
1650 const struct btrace_insn *insn;
1651
1652 /* If we can't step any further, we're done. */
1653 steps = btrace_insn_prev (replay, 1);
1654 if (steps == 0)
1655 return btrace_step_no_history ();
1656
1657 insn = btrace_insn_get (replay);
1658 gdb_assert (insn);
1659
1660 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1661 target_pid_to_str (tp->ptid),
1662 core_addr_to_string_nz (insn->pc));
1663
1664 if (breakpoint_here_p (aspace, insn->pc))
1665 return btrace_step_stopped ();
1666 }
1667 }
1668 }
1669
1670 /* The to_wait method of target record-btrace. */
1671
1672 static ptid_t
1673 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1674 struct target_waitstatus *status, int options)
1675 {
1676 struct thread_info *tp, *other;
1677
1678 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1679
1680 /* As long as we're not replaying, just forward the request. */
1681 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1682 {
1683 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1684 if (ops->to_wait != NULL)
1685 return ops->to_wait (ops, ptid, status, options);
1686
1687 error (_("Cannot find target for waiting."));
1688 }
1689
1690 /* Let's find a thread to move. */
1691 tp = record_btrace_find_thread_to_move (ptid);
1692 if (tp == NULL)
1693 {
1694 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1695
1696 status->kind = TARGET_WAITKIND_IGNORE;
1697 return minus_one_ptid;
1698 }
1699
1700 /* We only move a single thread. We're not able to correlate threads. */
1701 *status = record_btrace_step_thread (tp);
1702
1703 /* Stop all other threads. */
1704 if (!non_stop)
1705 ALL_THREADS (other)
1706 other->btrace.flags &= ~BTHR_MOVE;
1707
1708 /* Start record histories anew from the current position. */
1709 record_btrace_clear_histories (&tp->btrace);
1710
1711 /* We moved the replay position but did not update registers. */
1712 registers_changed_ptid (tp->ptid);
1713
1714 return tp->ptid;
1715 }
1716
1717 /* The to_can_execute_reverse method of target record-btrace. */
1718
1719 static int
1720 record_btrace_can_execute_reverse (struct target_ops *self)
1721 {
1722 return 1;
1723 }
1724
1725 /* The to_decr_pc_after_break method of target record-btrace. */
1726
1727 static CORE_ADDR
1728 record_btrace_decr_pc_after_break (struct target_ops *ops,
1729 struct gdbarch *gdbarch)
1730 {
1731 /* When replaying, we do not actually execute the breakpoint instruction
1732 so there is no need to adjust the PC after hitting a breakpoint. */
1733 if (record_btrace_is_replaying (ops))
1734 return 0;
1735
1736 return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
1737 }
1738
1739 /* The to_find_new_threads method of target record-btrace. */
1740
1741 static void
1742 record_btrace_find_new_threads (struct target_ops *ops)
1743 {
1744 /* Don't expect new threads if we're replaying. */
1745 if (record_btrace_is_replaying (ops))
1746 return;
1747
1748 /* Forward the request. */
1749 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1750 if (ops->to_find_new_threads != NULL)
1751 {
1752 ops->to_find_new_threads (ops);
1753 break;
1754 }
1755 }
1756
1757 /* The to_thread_alive method of target record-btrace. */
1758
1759 static int
1760 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1761 {
1762 /* We don't add or remove threads during replay. */
1763 if (record_btrace_is_replaying (ops))
1764 return find_thread_ptid (ptid) != NULL;
1765
1766 /* Forward the request. */
1767 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1768 if (ops->to_thread_alive != NULL)
1769 return ops->to_thread_alive (ops, ptid);
1770
1771 return 0;
1772 }
1773
1774 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1775 is stopped. */
1776
1777 static void
1778 record_btrace_set_replay (struct thread_info *tp,
1779 const struct btrace_insn_iterator *it)
1780 {
1781 struct btrace_thread_info *btinfo;
1782
1783 btinfo = &tp->btrace;
1784
1785 if (it == NULL || it->function == NULL)
1786 record_btrace_stop_replaying (tp);
1787 else
1788 {
1789 if (btinfo->replay == NULL)
1790 record_btrace_start_replaying (tp);
1791 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1792 return;
1793
1794 *btinfo->replay = *it;
1795 registers_changed_ptid (tp->ptid);
1796 }
1797
1798 /* Start anew from the new replay position. */
1799 record_btrace_clear_histories (btinfo);
1800 }
1801
1802 /* The to_goto_record_begin method of target record-btrace. */
1803
1804 static void
1805 record_btrace_goto_begin (struct target_ops *self)
1806 {
1807 struct thread_info *tp;
1808 struct btrace_insn_iterator begin;
1809
1810 tp = require_btrace_thread ();
1811
1812 btrace_insn_begin (&begin, &tp->btrace);
1813 record_btrace_set_replay (tp, &begin);
1814
1815 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1816 }
1817
1818 /* The to_goto_record_end method of target record-btrace. */
1819
1820 static void
1821 record_btrace_goto_end (void)
1822 {
1823 struct thread_info *tp;
1824
1825 tp = require_btrace_thread ();
1826
1827 record_btrace_set_replay (tp, NULL);
1828
1829 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1830 }
1831
1832 /* The to_goto_record method of target record-btrace. */
1833
1834 static void
1835 record_btrace_goto (ULONGEST insn)
1836 {
1837 struct thread_info *tp;
1838 struct btrace_insn_iterator it;
1839 unsigned int number;
1840 int found;
1841
1842 number = insn;
1843
1844 /* Check for wrap-arounds. */
1845 if (number != insn)
1846 error (_("Instruction number out of range."));
1847
1848 tp = require_btrace_thread ();
1849
1850 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1851 if (found == 0)
1852 error (_("No such instruction."));
1853
1854 record_btrace_set_replay (tp, &it);
1855
1856 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1857 }
1858
1859 /* Initialize the record-btrace target ops. */
1860
1861 static void
1862 init_record_btrace_ops (void)
1863 {
1864 struct target_ops *ops;
1865
1866 ops = &record_btrace_ops;
1867 ops->to_shortname = "record-btrace";
1868 ops->to_longname = "Branch tracing target";
1869 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1870 ops->to_open = record_btrace_open;
1871 ops->to_close = record_btrace_close;
1872 ops->to_detach = record_detach;
1873 ops->to_disconnect = record_disconnect;
1874 ops->to_mourn_inferior = record_mourn_inferior;
1875 ops->to_kill = record_kill;
1876 ops->to_create_inferior = find_default_create_inferior;
1877 ops->to_stop_recording = record_btrace_stop_recording;
1878 ops->to_info_record = record_btrace_info;
1879 ops->to_insn_history = record_btrace_insn_history;
1880 ops->to_insn_history_from = record_btrace_insn_history_from;
1881 ops->to_insn_history_range = record_btrace_insn_history_range;
1882 ops->to_call_history = record_btrace_call_history;
1883 ops->to_call_history_from = record_btrace_call_history_from;
1884 ops->to_call_history_range = record_btrace_call_history_range;
1885 ops->to_record_is_replaying = record_btrace_is_replaying;
1886 ops->to_xfer_partial = record_btrace_xfer_partial;
1887 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1888 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1889 ops->to_fetch_registers = record_btrace_fetch_registers;
1890 ops->to_store_registers = record_btrace_store_registers;
1891 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1892 ops->to_get_unwinder = &record_btrace_frame_unwind;
1893 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
1894 ops->to_resume = record_btrace_resume;
1895 ops->to_wait = record_btrace_wait;
1896 ops->to_find_new_threads = record_btrace_find_new_threads;
1897 ops->to_thread_alive = record_btrace_thread_alive;
1898 ops->to_goto_record_begin = record_btrace_goto_begin;
1899 ops->to_goto_record_end = record_btrace_goto_end;
1900 ops->to_goto_record = record_btrace_goto;
1901 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1902 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1903 ops->to_stratum = record_stratum;
1904 ops->to_magic = OPS_MAGIC;
1905 }
1906
1907 /* Alias for "target record". */
1908
1909 static void
1910 cmd_record_btrace_start (char *args, int from_tty)
1911 {
1912 if (args != NULL && *args != 0)
1913 error (_("Invalid argument."));
1914
1915 execute_command ("target record-btrace", from_tty);
1916 }
1917
1918 void _initialize_record_btrace (void);
1919
1920 /* Initialize btrace commands. */
1921
1922 void
1923 _initialize_record_btrace (void)
1924 {
1925 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1926 _("Start branch trace recording."),
1927 &record_cmdlist);
1928 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1929
1930 init_record_btrace_ops ();
1931 add_target (&record_btrace_ops);
1932
1933 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1934 xcalloc, xfree);
1935 }