]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
record-btrace: add to_wait and to_resume target methods.
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "exceptions.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37
38 /* The target_ops of record-btrace. */
39 static struct target_ops record_btrace_ops;
40
41 /* A new thread observer enabling branch tracing for the new thread. */
42 static struct observer *record_btrace_thread_observer;
43
44 /* Temporarily allow memory accesses. */
45 static int record_btrace_allow_memory_access;
46
47 /* Print a record-btrace debug message. Use do ... while (0) to avoid
48 ambiguities when used in if statements. */
49
50 #define DEBUG(msg, args...) \
51 do \
52 { \
53 if (record_debug != 0) \
54 fprintf_unfiltered (gdb_stdlog, \
55 "[record-btrace] " msg "\n", ##args); \
56 } \
57 while (0)
58
59
60 /* Update the branch trace for the current thread and return a pointer to its
61 branch trace information struct.
62
63 Throws an error if there is no thread or no trace. This function never
64 returns NULL. */
65
66 static struct btrace_thread_info *
67 require_btrace (void)
68 {
69 struct thread_info *tp;
70 struct btrace_thread_info *btinfo;
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
80 btinfo = &tp->btrace;
81
82 if (btinfo->begin == NULL)
83 error (_("No trace."));
84
85 return btinfo;
86 }
87
88 /* Enable branch tracing for one thread. Warn on errors. */
89
90 static void
91 record_btrace_enable_warn (struct thread_info *tp)
92 {
93 volatile struct gdb_exception error;
94
95 TRY_CATCH (error, RETURN_MASK_ERROR)
96 btrace_enable (tp);
97
98 if (error.message != NULL)
99 warning ("%s", error.message);
100 }
101
102 /* Callback function to disable branch tracing for one thread. */
103
104 static void
105 record_btrace_disable_callback (void *arg)
106 {
107 struct thread_info *tp;
108
109 tp = arg;
110
111 btrace_disable (tp);
112 }
113
114 /* Enable automatic tracing of new threads. */
115
116 static void
117 record_btrace_auto_enable (void)
118 {
119 DEBUG ("attach thread observer");
120
121 record_btrace_thread_observer
122 = observer_attach_new_thread (record_btrace_enable_warn);
123 }
124
125 /* Disable automatic tracing of new threads. */
126
127 static void
128 record_btrace_auto_disable (void)
129 {
130 /* The observer may have been detached, already. */
131 if (record_btrace_thread_observer == NULL)
132 return;
133
134 DEBUG ("detach thread observer");
135
136 observer_detach_new_thread (record_btrace_thread_observer);
137 record_btrace_thread_observer = NULL;
138 }
139
140 /* The to_open method of target record-btrace. */
141
142 static void
143 record_btrace_open (char *args, int from_tty)
144 {
145 struct cleanup *disable_chain;
146 struct thread_info *tp;
147
148 DEBUG ("open");
149
150 record_preopen ();
151
152 if (!target_has_execution)
153 error (_("The program is not being run."));
154
155 if (!target_supports_btrace ())
156 error (_("Target does not support branch tracing."));
157
158 gdb_assert (record_btrace_thread_observer == NULL);
159
160 disable_chain = make_cleanup (null_cleanup, NULL);
161 ALL_THREADS (tp)
162 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
163 {
164 btrace_enable (tp);
165
166 make_cleanup (record_btrace_disable_callback, tp);
167 }
168
169 record_btrace_auto_enable ();
170
171 push_target (&record_btrace_ops);
172
173 observer_notify_record_changed (current_inferior (), 1);
174
175 discard_cleanups (disable_chain);
176 }
177
178 /* The to_stop_recording method of target record-btrace. */
179
180 static void
181 record_btrace_stop_recording (void)
182 {
183 struct thread_info *tp;
184
185 DEBUG ("stop recording");
186
187 record_btrace_auto_disable ();
188
189 ALL_THREADS (tp)
190 if (tp->btrace.target != NULL)
191 btrace_disable (tp);
192 }
193
194 /* The to_close method of target record-btrace. */
195
196 static void
197 record_btrace_close (void)
198 {
199 /* Make sure automatic recording gets disabled even if we did not stop
200 recording before closing the record-btrace target. */
201 record_btrace_auto_disable ();
202
203 /* We already stopped recording. */
204 }
205
206 /* The to_info_record method of target record-btrace. */
207
208 static void
209 record_btrace_info (void)
210 {
211 struct btrace_thread_info *btinfo;
212 struct thread_info *tp;
213 unsigned int insns, calls;
214
215 DEBUG ("info");
216
217 tp = find_thread_ptid (inferior_ptid);
218 if (tp == NULL)
219 error (_("No thread."));
220
221 btrace_fetch (tp);
222
223 insns = 0;
224 calls = 0;
225
226 btinfo = &tp->btrace;
227 if (btinfo->begin != NULL)
228 {
229 struct btrace_call_iterator call;
230 struct btrace_insn_iterator insn;
231
232 btrace_call_end (&call, btinfo);
233 btrace_call_prev (&call, 1);
234 calls = btrace_call_number (&call);
235
236 btrace_insn_end (&insn, btinfo);
237 btrace_insn_prev (&insn, 1);
238 insns = btrace_insn_number (&insn);
239 }
240
241 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
242 "%d (%s).\n"), insns, calls, tp->num,
243 target_pid_to_str (tp->ptid));
244
245 if (btrace_is_replaying (tp))
246 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
247 btrace_insn_number (btinfo->replay));
248 }
249
250 /* Print an unsigned int. */
251
252 static void
253 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
254 {
255 ui_out_field_fmt (uiout, fld, "%u", val);
256 }
257
258 /* Disassemble a section of the recorded instruction trace. */
259
260 static void
261 btrace_insn_history (struct ui_out *uiout,
262 const struct btrace_insn_iterator *begin,
263 const struct btrace_insn_iterator *end, int flags)
264 {
265 struct gdbarch *gdbarch;
266 struct btrace_insn_iterator it;
267
268 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
269 btrace_insn_number (end));
270
271 gdbarch = target_gdbarch ();
272
273 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
274 {
275 const struct btrace_insn *insn;
276
277 insn = btrace_insn_get (&it);
278
279 /* Print the instruction index. */
280 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
281 ui_out_text (uiout, "\t");
282
283 /* Disassembly with '/m' flag may not produce the expected result.
284 See PR gdb/11833. */
285 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
286 }
287 }
288
289 /* The to_insn_history method of target record-btrace. */
290
291 static void
292 record_btrace_insn_history (int size, int flags)
293 {
294 struct btrace_thread_info *btinfo;
295 struct btrace_insn_history *history;
296 struct btrace_insn_iterator begin, end;
297 struct cleanup *uiout_cleanup;
298 struct ui_out *uiout;
299 unsigned int context, covered;
300
301 uiout = current_uiout;
302 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
303 "insn history");
304 context = abs (size);
305 if (context == 0)
306 error (_("Bad record instruction-history-size."));
307
308 btinfo = require_btrace ();
309 history = btinfo->insn_history;
310 if (history == NULL)
311 {
312 struct btrace_insn_iterator *replay;
313
314 DEBUG ("insn-history (0x%x): %d", flags, size);
315
316 /* If we're replaying, we start at the replay position. Otherwise, we
317 start at the tail of the trace. */
318 replay = btinfo->replay;
319 if (replay != NULL)
320 begin = *replay;
321 else
322 btrace_insn_end (&begin, btinfo);
323
324 /* We start from here and expand in the requested direction. Then we
325 expand in the other direction, as well, to fill up any remaining
326 context. */
327 end = begin;
328 if (size < 0)
329 {
330 /* We want the current position covered, as well. */
331 covered = btrace_insn_next (&end, 1);
332 covered += btrace_insn_prev (&begin, context - covered);
333 covered += btrace_insn_next (&end, context - covered);
334 }
335 else
336 {
337 covered = btrace_insn_next (&end, context);
338 covered += btrace_insn_prev (&begin, context - covered);
339 }
340 }
341 else
342 {
343 begin = history->begin;
344 end = history->end;
345
346 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
347 btrace_insn_number (&begin), btrace_insn_number (&end));
348
349 if (size < 0)
350 {
351 end = begin;
352 covered = btrace_insn_prev (&begin, context);
353 }
354 else
355 {
356 begin = end;
357 covered = btrace_insn_next (&end, context);
358 }
359 }
360
361 if (covered > 0)
362 btrace_insn_history (uiout, &begin, &end, flags);
363 else
364 {
365 if (size < 0)
366 printf_unfiltered (_("At the start of the branch trace record.\n"));
367 else
368 printf_unfiltered (_("At the end of the branch trace record.\n"));
369 }
370
371 btrace_set_insn_history (btinfo, &begin, &end);
372 do_cleanups (uiout_cleanup);
373 }
374
375 /* The to_insn_history_range method of target record-btrace. */
376
377 static void
378 record_btrace_insn_history_range (ULONGEST from, ULONGEST to, int flags)
379 {
380 struct btrace_thread_info *btinfo;
381 struct btrace_insn_history *history;
382 struct btrace_insn_iterator begin, end;
383 struct cleanup *uiout_cleanup;
384 struct ui_out *uiout;
385 unsigned int low, high;
386 int found;
387
388 uiout = current_uiout;
389 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
390 "insn history");
391 low = from;
392 high = to;
393
394 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
395
396 /* Check for wrap-arounds. */
397 if (low != from || high != to)
398 error (_("Bad range."));
399
400 if (high < low)
401 error (_("Bad range."));
402
403 btinfo = require_btrace ();
404
405 found = btrace_find_insn_by_number (&begin, btinfo, low);
406 if (found == 0)
407 error (_("Range out of bounds."));
408
409 found = btrace_find_insn_by_number (&end, btinfo, high);
410 if (found == 0)
411 {
412 /* Silently truncate the range. */
413 btrace_insn_end (&end, btinfo);
414 }
415 else
416 {
417 /* We want both begin and end to be inclusive. */
418 btrace_insn_next (&end, 1);
419 }
420
421 btrace_insn_history (uiout, &begin, &end, flags);
422 btrace_set_insn_history (btinfo, &begin, &end);
423
424 do_cleanups (uiout_cleanup);
425 }
426
427 /* The to_insn_history_from method of target record-btrace. */
428
429 static void
430 record_btrace_insn_history_from (ULONGEST from, int size, int flags)
431 {
432 ULONGEST begin, end, context;
433
434 context = abs (size);
435 if (context == 0)
436 error (_("Bad record instruction-history-size."));
437
438 if (size < 0)
439 {
440 end = from;
441
442 if (from < context)
443 begin = 0;
444 else
445 begin = from - context + 1;
446 }
447 else
448 {
449 begin = from;
450 end = from + context - 1;
451
452 /* Check for wrap-around. */
453 if (end < begin)
454 end = ULONGEST_MAX;
455 }
456
457 record_btrace_insn_history_range (begin, end, flags);
458 }
459
460 /* Print the instruction number range for a function call history line. */
461
462 static void
463 btrace_call_history_insn_range (struct ui_out *uiout,
464 const struct btrace_function *bfun)
465 {
466 unsigned int begin, end, size;
467
468 size = VEC_length (btrace_insn_s, bfun->insn);
469 gdb_assert (size > 0);
470
471 begin = bfun->insn_offset;
472 end = begin + size - 1;
473
474 ui_out_field_uint (uiout, "insn begin", begin);
475 ui_out_text (uiout, ",");
476 ui_out_field_uint (uiout, "insn end", end);
477 }
478
479 /* Print the source line information for a function call history line. */
480
481 static void
482 btrace_call_history_src_line (struct ui_out *uiout,
483 const struct btrace_function *bfun)
484 {
485 struct symbol *sym;
486 int begin, end;
487
488 sym = bfun->sym;
489 if (sym == NULL)
490 return;
491
492 ui_out_field_string (uiout, "file",
493 symtab_to_filename_for_display (sym->symtab));
494
495 begin = bfun->lbegin;
496 end = bfun->lend;
497
498 if (end < begin)
499 return;
500
501 ui_out_text (uiout, ":");
502 ui_out_field_int (uiout, "min line", begin);
503
504 if (end == begin)
505 return;
506
507 ui_out_text (uiout, ",");
508 ui_out_field_int (uiout, "max line", end);
509 }
510
511 /* Disassemble a section of the recorded function trace. */
512
513 static void
514 btrace_call_history (struct ui_out *uiout,
515 const struct btrace_thread_info *btinfo,
516 const struct btrace_call_iterator *begin,
517 const struct btrace_call_iterator *end,
518 enum record_print_flag flags)
519 {
520 struct btrace_call_iterator it;
521
522 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
523 btrace_call_number (end));
524
525 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
526 {
527 const struct btrace_function *bfun;
528 struct minimal_symbol *msym;
529 struct symbol *sym;
530
531 bfun = btrace_call_get (&it);
532 msym = bfun->msym;
533 sym = bfun->sym;
534
535 /* Print the function index. */
536 ui_out_field_uint (uiout, "index", bfun->number);
537 ui_out_text (uiout, "\t");
538
539 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
540 {
541 int level = bfun->level + btinfo->level, i;
542
543 for (i = 0; i < level; ++i)
544 ui_out_text (uiout, " ");
545 }
546
547 if (sym != NULL)
548 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
549 else if (msym != NULL)
550 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
551 else if (!ui_out_is_mi_like_p (uiout))
552 ui_out_field_string (uiout, "function", "??");
553
554 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
555 {
556 ui_out_text (uiout, _("\tinst "));
557 btrace_call_history_insn_range (uiout, bfun);
558 }
559
560 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
561 {
562 ui_out_text (uiout, _("\tat "));
563 btrace_call_history_src_line (uiout, bfun);
564 }
565
566 ui_out_text (uiout, "\n");
567 }
568 }
569
570 /* The to_call_history method of target record-btrace. */
571
572 static void
573 record_btrace_call_history (int size, int flags)
574 {
575 struct btrace_thread_info *btinfo;
576 struct btrace_call_history *history;
577 struct btrace_call_iterator begin, end;
578 struct cleanup *uiout_cleanup;
579 struct ui_out *uiout;
580 unsigned int context, covered;
581
582 uiout = current_uiout;
583 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
584 "insn history");
585 context = abs (size);
586 if (context == 0)
587 error (_("Bad record function-call-history-size."));
588
589 btinfo = require_btrace ();
590 history = btinfo->call_history;
591 if (history == NULL)
592 {
593 struct btrace_insn_iterator *replay;
594
595 DEBUG ("call-history (0x%x): %d", flags, size);
596
597 /* If we're replaying, we start at the replay position. Otherwise, we
598 start at the tail of the trace. */
599 replay = btinfo->replay;
600 if (replay != NULL)
601 {
602 begin.function = replay->function;
603 begin.btinfo = btinfo;
604 }
605 else
606 btrace_call_end (&begin, btinfo);
607
608 /* We start from here and expand in the requested direction. Then we
609 expand in the other direction, as well, to fill up any remaining
610 context. */
611 end = begin;
612 if (size < 0)
613 {
614 /* We want the current position covered, as well. */
615 covered = btrace_call_next (&end, 1);
616 covered += btrace_call_prev (&begin, context - covered);
617 covered += btrace_call_next (&end, context - covered);
618 }
619 else
620 {
621 covered = btrace_call_next (&end, context);
622 covered += btrace_call_prev (&begin, context- covered);
623 }
624 }
625 else
626 {
627 begin = history->begin;
628 end = history->end;
629
630 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
631 btrace_call_number (&begin), btrace_call_number (&end));
632
633 if (size < 0)
634 {
635 end = begin;
636 covered = btrace_call_prev (&begin, context);
637 }
638 else
639 {
640 begin = end;
641 covered = btrace_call_next (&end, context);
642 }
643 }
644
645 if (covered > 0)
646 btrace_call_history (uiout, btinfo, &begin, &end, flags);
647 else
648 {
649 if (size < 0)
650 printf_unfiltered (_("At the start of the branch trace record.\n"));
651 else
652 printf_unfiltered (_("At the end of the branch trace record.\n"));
653 }
654
655 btrace_set_call_history (btinfo, &begin, &end);
656 do_cleanups (uiout_cleanup);
657 }
658
659 /* The to_call_history_range method of target record-btrace. */
660
661 static void
662 record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
663 {
664 struct btrace_thread_info *btinfo;
665 struct btrace_call_history *history;
666 struct btrace_call_iterator begin, end;
667 struct cleanup *uiout_cleanup;
668 struct ui_out *uiout;
669 unsigned int low, high;
670 int found;
671
672 uiout = current_uiout;
673 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
674 "func history");
675 low = from;
676 high = to;
677
678 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
679
680 /* Check for wrap-arounds. */
681 if (low != from || high != to)
682 error (_("Bad range."));
683
684 if (high < low)
685 error (_("Bad range."));
686
687 btinfo = require_btrace ();
688
689 found = btrace_find_call_by_number (&begin, btinfo, low);
690 if (found == 0)
691 error (_("Range out of bounds."));
692
693 found = btrace_find_call_by_number (&end, btinfo, high);
694 if (found == 0)
695 {
696 /* Silently truncate the range. */
697 btrace_call_end (&end, btinfo);
698 }
699 else
700 {
701 /* We want both begin and end to be inclusive. */
702 btrace_call_next (&end, 1);
703 }
704
705 btrace_call_history (uiout, btinfo, &begin, &end, flags);
706 btrace_set_call_history (btinfo, &begin, &end);
707
708 do_cleanups (uiout_cleanup);
709 }
710
711 /* The to_call_history_from method of target record-btrace. */
712
713 static void
714 record_btrace_call_history_from (ULONGEST from, int size, int flags)
715 {
716 ULONGEST begin, end, context;
717
718 context = abs (size);
719 if (context == 0)
720 error (_("Bad record function-call-history-size."));
721
722 if (size < 0)
723 {
724 end = from;
725
726 if (from < context)
727 begin = 0;
728 else
729 begin = from - context + 1;
730 }
731 else
732 {
733 begin = from;
734 end = from + context - 1;
735
736 /* Check for wrap-around. */
737 if (end < begin)
738 end = ULONGEST_MAX;
739 }
740
741 record_btrace_call_history_range (begin, end, flags);
742 }
743
744 /* The to_record_is_replaying method of target record-btrace. */
745
746 static int
747 record_btrace_is_replaying (void)
748 {
749 struct thread_info *tp;
750
751 ALL_THREADS (tp)
752 if (btrace_is_replaying (tp))
753 return 1;
754
755 return 0;
756 }
757
758 /* The to_xfer_partial method of target record-btrace. */
759
760 static LONGEST
761 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
762 const char *annex, gdb_byte *readbuf,
763 const gdb_byte *writebuf, ULONGEST offset,
764 ULONGEST len)
765 {
766 struct target_ops *t;
767
768 /* Filter out requests that don't make sense during replay. */
769 if (!record_btrace_allow_memory_access && record_btrace_is_replaying ())
770 {
771 switch (object)
772 {
773 case TARGET_OBJECT_MEMORY:
774 {
775 struct target_section *section;
776
777 /* We do not allow writing memory in general. */
778 if (writebuf != NULL)
779 return TARGET_XFER_E_UNAVAILABLE;
780
781 /* We allow reading readonly memory. */
782 section = target_section_by_addr (ops, offset);
783 if (section != NULL)
784 {
785 /* Check if the section we found is readonly. */
786 if ((bfd_get_section_flags (section->the_bfd_section->owner,
787 section->the_bfd_section)
788 & SEC_READONLY) != 0)
789 {
790 /* Truncate the request to fit into this section. */
791 len = min (len, section->endaddr - offset);
792 break;
793 }
794 }
795
796 return TARGET_XFER_E_UNAVAILABLE;
797 }
798 }
799 }
800
801 /* Forward the request. */
802 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
803 if (ops->to_xfer_partial != NULL)
804 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
805 offset, len);
806
807 return TARGET_XFER_E_UNAVAILABLE;
808 }
809
810 /* The to_insert_breakpoint method of target record-btrace. */
811
812 static int
813 record_btrace_insert_breakpoint (struct target_ops *ops,
814 struct gdbarch *gdbarch,
815 struct bp_target_info *bp_tgt)
816 {
817 volatile struct gdb_exception except;
818 int old, ret;
819
820 /* Inserting breakpoints requires accessing memory. Allow it for the
821 duration of this function. */
822 old = record_btrace_allow_memory_access;
823 record_btrace_allow_memory_access = 1;
824
825 ret = 0;
826 TRY_CATCH (except, RETURN_MASK_ALL)
827 ret = forward_target_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
828
829 record_btrace_allow_memory_access = old;
830
831 if (except.reason < 0)
832 throw_exception (except);
833
834 return ret;
835 }
836
837 /* The to_remove_breakpoint method of target record-btrace. */
838
839 static int
840 record_btrace_remove_breakpoint (struct target_ops *ops,
841 struct gdbarch *gdbarch,
842 struct bp_target_info *bp_tgt)
843 {
844 volatile struct gdb_exception except;
845 int old, ret;
846
847 /* Removing breakpoints requires accessing memory. Allow it for the
848 duration of this function. */
849 old = record_btrace_allow_memory_access;
850 record_btrace_allow_memory_access = 1;
851
852 ret = 0;
853 TRY_CATCH (except, RETURN_MASK_ALL)
854 ret = forward_target_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
855
856 record_btrace_allow_memory_access = old;
857
858 if (except.reason < 0)
859 throw_exception (except);
860
861 return ret;
862 }
863
864 /* The to_fetch_registers method of target record-btrace. */
865
866 static void
867 record_btrace_fetch_registers (struct target_ops *ops,
868 struct regcache *regcache, int regno)
869 {
870 struct btrace_insn_iterator *replay;
871 struct thread_info *tp;
872
873 tp = find_thread_ptid (inferior_ptid);
874 gdb_assert (tp != NULL);
875
876 replay = tp->btrace.replay;
877 if (replay != NULL)
878 {
879 const struct btrace_insn *insn;
880 struct gdbarch *gdbarch;
881 int pcreg;
882
883 gdbarch = get_regcache_arch (regcache);
884 pcreg = gdbarch_pc_regnum (gdbarch);
885 if (pcreg < 0)
886 return;
887
888 /* We can only provide the PC register. */
889 if (regno >= 0 && regno != pcreg)
890 return;
891
892 insn = btrace_insn_get (replay);
893 gdb_assert (insn != NULL);
894
895 regcache_raw_supply (regcache, regno, &insn->pc);
896 }
897 else
898 {
899 struct target_ops *t;
900
901 for (t = ops->beneath; t != NULL; t = t->beneath)
902 if (t->to_fetch_registers != NULL)
903 {
904 t->to_fetch_registers (t, regcache, regno);
905 break;
906 }
907 }
908 }
909
910 /* The to_store_registers method of target record-btrace. */
911
912 static void
913 record_btrace_store_registers (struct target_ops *ops,
914 struct regcache *regcache, int regno)
915 {
916 struct target_ops *t;
917
918 if (record_btrace_is_replaying ())
919 error (_("This record target does not allow writing registers."));
920
921 gdb_assert (may_write_registers != 0);
922
923 for (t = ops->beneath; t != NULL; t = t->beneath)
924 if (t->to_store_registers != NULL)
925 {
926 t->to_store_registers (t, regcache, regno);
927 return;
928 }
929
930 noprocess ();
931 }
932
933 /* The to_prepare_to_store method of target record-btrace. */
934
935 static void
936 record_btrace_prepare_to_store (struct target_ops *ops,
937 struct regcache *regcache)
938 {
939 struct target_ops *t;
940
941 if (record_btrace_is_replaying ())
942 return;
943
944 for (t = ops->beneath; t != NULL; t = t->beneath)
945 if (t->to_prepare_to_store != NULL)
946 {
947 t->to_prepare_to_store (t, regcache);
948 return;
949 }
950 }
951
952 /* Implement stop_reason method for record_btrace_frame_unwind. */
953
954 static enum unwind_stop_reason
955 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
956 void **this_cache)
957 {
958 return UNWIND_UNAVAILABLE;
959 }
960
961 /* Implement this_id method for record_btrace_frame_unwind. */
962
963 static void
964 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
965 struct frame_id *this_id)
966 {
967 /* Leave there the outer_frame_id value. */
968 }
969
970 /* Implement prev_register method for record_btrace_frame_unwind. */
971
972 static struct value *
973 record_btrace_frame_prev_register (struct frame_info *this_frame,
974 void **this_cache,
975 int regnum)
976 {
977 throw_error (NOT_AVAILABLE_ERROR,
978 _("Registers are not available in btrace record history"));
979 }
980
981 /* Implement sniffer method for record_btrace_frame_unwind. */
982
983 static int
984 record_btrace_frame_sniffer (const struct frame_unwind *self,
985 struct frame_info *this_frame,
986 void **this_cache)
987 {
988 struct thread_info *tp;
989 struct btrace_thread_info *btinfo;
990 struct btrace_insn_iterator *replay;
991
992 /* THIS_FRAME does not contain a reference to its thread. */
993 tp = find_thread_ptid (inferior_ptid);
994 gdb_assert (tp != NULL);
995
996 return btrace_is_replaying (tp);
997 }
998
999 /* btrace recording does not store previous memory content, neither the stack
1000 frames content. Any unwinding would return errorneous results as the stack
1001 contents no longer matches the changed PC value restored from history.
1002 Therefore this unwinder reports any possibly unwound registers as
1003 <unavailable>. */
1004
1005 static const struct frame_unwind record_btrace_frame_unwind =
1006 {
1007 NORMAL_FRAME,
1008 record_btrace_frame_unwind_stop_reason,
1009 record_btrace_frame_this_id,
1010 record_btrace_frame_prev_register,
1011 NULL,
1012 record_btrace_frame_sniffer
1013 };
1014
1015 /* The to_resume method of target record-btrace. */
1016
1017 static void
1018 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1019 enum gdb_signal signal)
1020 {
1021 /* As long as we're not replaying, just forward the request. */
1022 if (!record_btrace_is_replaying ())
1023 {
1024 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1025 if (ops->to_resume != NULL)
1026 return ops->to_resume (ops, ptid, step, signal);
1027
1028 error (_("Cannot find target for stepping."));
1029 }
1030
1031 error (_("You can't do this from here. Do 'record goto end', first."));
1032 }
1033
1034 /* The to_wait method of target record-btrace. */
1035
1036 static ptid_t
1037 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1038 struct target_waitstatus *status, int options)
1039 {
1040 /* As long as we're not replaying, just forward the request. */
1041 if (!record_btrace_is_replaying ())
1042 {
1043 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1044 if (ops->to_wait != NULL)
1045 return ops->to_wait (ops, ptid, status, options);
1046
1047 error (_("Cannot find target for waiting."));
1048 }
1049
1050 error (_("You can't do this from here. Do 'record goto end', first."));
1051 }
1052
1053 /* Initialize the record-btrace target ops. */
1054
1055 static void
1056 init_record_btrace_ops (void)
1057 {
1058 struct target_ops *ops;
1059
1060 ops = &record_btrace_ops;
1061 ops->to_shortname = "record-btrace";
1062 ops->to_longname = "Branch tracing target";
1063 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1064 ops->to_open = record_btrace_open;
1065 ops->to_close = record_btrace_close;
1066 ops->to_detach = record_detach;
1067 ops->to_disconnect = record_disconnect;
1068 ops->to_mourn_inferior = record_mourn_inferior;
1069 ops->to_kill = record_kill;
1070 ops->to_create_inferior = find_default_create_inferior;
1071 ops->to_stop_recording = record_btrace_stop_recording;
1072 ops->to_info_record = record_btrace_info;
1073 ops->to_insn_history = record_btrace_insn_history;
1074 ops->to_insn_history_from = record_btrace_insn_history_from;
1075 ops->to_insn_history_range = record_btrace_insn_history_range;
1076 ops->to_call_history = record_btrace_call_history;
1077 ops->to_call_history_from = record_btrace_call_history_from;
1078 ops->to_call_history_range = record_btrace_call_history_range;
1079 ops->to_record_is_replaying = record_btrace_is_replaying;
1080 ops->to_xfer_partial = record_btrace_xfer_partial;
1081 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1082 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1083 ops->to_fetch_registers = record_btrace_fetch_registers;
1084 ops->to_store_registers = record_btrace_store_registers;
1085 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1086 ops->to_get_unwinder = &record_btrace_frame_unwind;
1087 ops->to_resume = record_btrace_resume;
1088 ops->to_wait = record_btrace_wait;
1089 ops->to_stratum = record_stratum;
1090 ops->to_magic = OPS_MAGIC;
1091 }
1092
1093 /* Alias for "target record". */
1094
1095 static void
1096 cmd_record_btrace_start (char *args, int from_tty)
1097 {
1098 if (args != NULL && *args != 0)
1099 error (_("Invalid argument."));
1100
1101 execute_command ("target record-btrace", from_tty);
1102 }
1103
1104 void _initialize_record_btrace (void);
1105
1106 /* Initialize btrace commands. */
1107
1108 void
1109 _initialize_record_btrace (void)
1110 {
1111 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1112 _("Start branch trace recording."),
1113 &record_cmdlist);
1114 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1115
1116 init_record_btrace_ops ();
1117 add_target (&record_btrace_ops);
1118 }