]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Push pruning old threads down to the target
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
43
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
46
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55 };
56
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
59
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
63
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
72
73 /* Print a record-btrace debug message. Use do ... while (0) to avoid
74 ambiguities when used in if statements. */
75
76 #define DEBUG(msg, args...) \
77 do \
78 { \
79 if (record_debug != 0) \
80 fprintf_unfiltered (gdb_stdlog, \
81 "[record-btrace] " msg "\n", ##args); \
82 } \
83 while (0)
84
85
86 /* Update the branch trace for the current thread and return a pointer to its
87 thread_info.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92 static struct thread_info *
93 require_btrace_thread (void)
94 {
95 struct thread_info *tp;
96
97 DEBUG ("require");
98
99 tp = find_thread_ptid (inferior_ptid);
100 if (tp == NULL)
101 error (_("No thread."));
102
103 btrace_fetch (tp);
104
105 if (btrace_is_empty (tp))
106 error (_("No trace."));
107
108 return tp;
109 }
110
111 /* Update the branch trace for the current thread and return a pointer to its
112 branch trace information struct.
113
114 Throws an error if there is no thread or no trace. This function never
115 returns NULL. */
116
117 static struct btrace_thread_info *
118 require_btrace (void)
119 {
120 struct thread_info *tp;
121
122 tp = require_btrace_thread ();
123
124 return &tp->btrace;
125 }
126
127 /* Enable branch tracing for one thread. Warn on errors. */
128
129 static void
130 record_btrace_enable_warn (struct thread_info *tp)
131 {
132 volatile struct gdb_exception error;
133
134 TRY_CATCH (error, RETURN_MASK_ERROR)
135 btrace_enable (tp);
136
137 if (error.message != NULL)
138 warning ("%s", error.message);
139 }
140
141 /* Callback function to disable branch tracing for one thread. */
142
143 static void
144 record_btrace_disable_callback (void *arg)
145 {
146 struct thread_info *tp;
147
148 tp = arg;
149
150 btrace_disable (tp);
151 }
152
153 /* Enable automatic tracing of new threads. */
154
155 static void
156 record_btrace_auto_enable (void)
157 {
158 DEBUG ("attach thread observer");
159
160 record_btrace_thread_observer
161 = observer_attach_new_thread (record_btrace_enable_warn);
162 }
163
164 /* Disable automatic tracing of new threads. */
165
166 static void
167 record_btrace_auto_disable (void)
168 {
169 /* The observer may have been detached, already. */
170 if (record_btrace_thread_observer == NULL)
171 return;
172
173 DEBUG ("detach thread observer");
174
175 observer_detach_new_thread (record_btrace_thread_observer);
176 record_btrace_thread_observer = NULL;
177 }
178
179 /* The record-btrace async event handler function. */
180
181 static void
182 record_btrace_handle_async_inferior_event (gdb_client_data data)
183 {
184 inferior_event_handler (INF_REG_EVENT, NULL);
185 }
186
187 /* The to_open method of target record-btrace. */
188
189 static void
190 record_btrace_open (const char *args, int from_tty)
191 {
192 struct cleanup *disable_chain;
193 struct thread_info *tp;
194
195 DEBUG ("open");
196
197 record_preopen ();
198
199 if (!target_has_execution)
200 error (_("The program is not being run."));
201
202 if (!target_supports_btrace ())
203 error (_("Target does not support branch tracing."));
204
205 if (non_stop)
206 error (_("Record btrace can't debug inferior in non-stop mode."));
207
208 gdb_assert (record_btrace_thread_observer == NULL);
209
210 disable_chain = make_cleanup (null_cleanup, NULL);
211 ALL_NON_EXITED_THREADS (tp)
212 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
213 {
214 btrace_enable (tp);
215
216 make_cleanup (record_btrace_disable_callback, tp);
217 }
218
219 record_btrace_auto_enable ();
220
221 push_target (&record_btrace_ops);
222
223 record_btrace_async_inferior_event_handler
224 = create_async_event_handler (record_btrace_handle_async_inferior_event,
225 NULL);
226 record_btrace_generating_corefile = 0;
227
228 observer_notify_record_changed (current_inferior (), 1);
229
230 discard_cleanups (disable_chain);
231 }
232
233 /* The to_stop_recording method of target record-btrace. */
234
235 static void
236 record_btrace_stop_recording (struct target_ops *self)
237 {
238 struct thread_info *tp;
239
240 DEBUG ("stop recording");
241
242 record_btrace_auto_disable ();
243
244 ALL_NON_EXITED_THREADS (tp)
245 if (tp->btrace.target != NULL)
246 btrace_disable (tp);
247 }
248
249 /* The to_close method of target record-btrace. */
250
251 static void
252 record_btrace_close (struct target_ops *self)
253 {
254 struct thread_info *tp;
255
256 if (record_btrace_async_inferior_event_handler != NULL)
257 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
258
259 /* Make sure automatic recording gets disabled even if we did not stop
260 recording before closing the record-btrace target. */
261 record_btrace_auto_disable ();
262
263 /* We should have already stopped recording.
264 Tear down btrace in case we have not. */
265 ALL_NON_EXITED_THREADS (tp)
266 btrace_teardown (tp);
267 }
268
269 /* The to_info_record method of target record-btrace. */
270
271 static void
272 record_btrace_info (struct target_ops *self)
273 {
274 struct btrace_thread_info *btinfo;
275 struct thread_info *tp;
276 unsigned int insns, calls;
277
278 DEBUG ("info");
279
280 tp = find_thread_ptid (inferior_ptid);
281 if (tp == NULL)
282 error (_("No thread."));
283
284 btrace_fetch (tp);
285
286 insns = 0;
287 calls = 0;
288
289 btinfo = &tp->btrace;
290
291 if (!btrace_is_empty (tp))
292 {
293 struct btrace_call_iterator call;
294 struct btrace_insn_iterator insn;
295
296 btrace_call_end (&call, btinfo);
297 btrace_call_prev (&call, 1);
298 calls = btrace_call_number (&call);
299
300 btrace_insn_end (&insn, btinfo);
301 btrace_insn_prev (&insn, 1);
302 insns = btrace_insn_number (&insn);
303 }
304
305 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
306 "%d (%s).\n"), insns, calls, tp->num,
307 target_pid_to_str (tp->ptid));
308
309 if (btrace_is_replaying (tp))
310 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
311 btrace_insn_number (btinfo->replay));
312 }
313
314 /* Print an unsigned int. */
315
316 static void
317 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
318 {
319 ui_out_field_fmt (uiout, fld, "%u", val);
320 }
321
322 /* Disassemble a section of the recorded instruction trace. */
323
324 static void
325 btrace_insn_history (struct ui_out *uiout,
326 const struct btrace_insn_iterator *begin,
327 const struct btrace_insn_iterator *end, int flags)
328 {
329 struct gdbarch *gdbarch;
330 struct btrace_insn_iterator it;
331
332 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
333 btrace_insn_number (end));
334
335 gdbarch = target_gdbarch ();
336
337 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
338 {
339 const struct btrace_insn *insn;
340
341 insn = btrace_insn_get (&it);
342
343 /* Print the instruction index. */
344 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
345 ui_out_text (uiout, "\t");
346
347 /* Disassembly with '/m' flag may not produce the expected result.
348 See PR gdb/11833. */
349 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
350 }
351 }
352
353 /* The to_insn_history method of target record-btrace. */
354
355 static void
356 record_btrace_insn_history (struct target_ops *self, int size, int flags)
357 {
358 struct btrace_thread_info *btinfo;
359 struct btrace_insn_history *history;
360 struct btrace_insn_iterator begin, end;
361 struct cleanup *uiout_cleanup;
362 struct ui_out *uiout;
363 unsigned int context, covered;
364
365 uiout = current_uiout;
366 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
367 "insn history");
368 context = abs (size);
369 if (context == 0)
370 error (_("Bad record instruction-history-size."));
371
372 btinfo = require_btrace ();
373 history = btinfo->insn_history;
374 if (history == NULL)
375 {
376 struct btrace_insn_iterator *replay;
377
378 DEBUG ("insn-history (0x%x): %d", flags, size);
379
380 /* If we're replaying, we start at the replay position. Otherwise, we
381 start at the tail of the trace. */
382 replay = btinfo->replay;
383 if (replay != NULL)
384 begin = *replay;
385 else
386 btrace_insn_end (&begin, btinfo);
387
388 /* We start from here and expand in the requested direction. Then we
389 expand in the other direction, as well, to fill up any remaining
390 context. */
391 end = begin;
392 if (size < 0)
393 {
394 /* We want the current position covered, as well. */
395 covered = btrace_insn_next (&end, 1);
396 covered += btrace_insn_prev (&begin, context - covered);
397 covered += btrace_insn_next (&end, context - covered);
398 }
399 else
400 {
401 covered = btrace_insn_next (&end, context);
402 covered += btrace_insn_prev (&begin, context - covered);
403 }
404 }
405 else
406 {
407 begin = history->begin;
408 end = history->end;
409
410 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
411 btrace_insn_number (&begin), btrace_insn_number (&end));
412
413 if (size < 0)
414 {
415 end = begin;
416 covered = btrace_insn_prev (&begin, context);
417 }
418 else
419 {
420 begin = end;
421 covered = btrace_insn_next (&end, context);
422 }
423 }
424
425 if (covered > 0)
426 btrace_insn_history (uiout, &begin, &end, flags);
427 else
428 {
429 if (size < 0)
430 printf_unfiltered (_("At the start of the branch trace record.\n"));
431 else
432 printf_unfiltered (_("At the end of the branch trace record.\n"));
433 }
434
435 btrace_set_insn_history (btinfo, &begin, &end);
436 do_cleanups (uiout_cleanup);
437 }
438
439 /* The to_insn_history_range method of target record-btrace. */
440
441 static void
442 record_btrace_insn_history_range (struct target_ops *self,
443 ULONGEST from, ULONGEST to, int flags)
444 {
445 struct btrace_thread_info *btinfo;
446 struct btrace_insn_history *history;
447 struct btrace_insn_iterator begin, end;
448 struct cleanup *uiout_cleanup;
449 struct ui_out *uiout;
450 unsigned int low, high;
451 int found;
452
453 uiout = current_uiout;
454 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
455 "insn history");
456 low = from;
457 high = to;
458
459 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
460
461 /* Check for wrap-arounds. */
462 if (low != from || high != to)
463 error (_("Bad range."));
464
465 if (high < low)
466 error (_("Bad range."));
467
468 btinfo = require_btrace ();
469
470 found = btrace_find_insn_by_number (&begin, btinfo, low);
471 if (found == 0)
472 error (_("Range out of bounds."));
473
474 found = btrace_find_insn_by_number (&end, btinfo, high);
475 if (found == 0)
476 {
477 /* Silently truncate the range. */
478 btrace_insn_end (&end, btinfo);
479 }
480 else
481 {
482 /* We want both begin and end to be inclusive. */
483 btrace_insn_next (&end, 1);
484 }
485
486 btrace_insn_history (uiout, &begin, &end, flags);
487 btrace_set_insn_history (btinfo, &begin, &end);
488
489 do_cleanups (uiout_cleanup);
490 }
491
492 /* The to_insn_history_from method of target record-btrace. */
493
494 static void
495 record_btrace_insn_history_from (struct target_ops *self,
496 ULONGEST from, int size, int flags)
497 {
498 ULONGEST begin, end, context;
499
500 context = abs (size);
501 if (context == 0)
502 error (_("Bad record instruction-history-size."));
503
504 if (size < 0)
505 {
506 end = from;
507
508 if (from < context)
509 begin = 0;
510 else
511 begin = from - context + 1;
512 }
513 else
514 {
515 begin = from;
516 end = from + context - 1;
517
518 /* Check for wrap-around. */
519 if (end < begin)
520 end = ULONGEST_MAX;
521 }
522
523 record_btrace_insn_history_range (self, begin, end, flags);
524 }
525
526 /* Print the instruction number range for a function call history line. */
527
528 static void
529 btrace_call_history_insn_range (struct ui_out *uiout,
530 const struct btrace_function *bfun)
531 {
532 unsigned int begin, end, size;
533
534 size = VEC_length (btrace_insn_s, bfun->insn);
535 gdb_assert (size > 0);
536
537 begin = bfun->insn_offset;
538 end = begin + size - 1;
539
540 ui_out_field_uint (uiout, "insn begin", begin);
541 ui_out_text (uiout, ",");
542 ui_out_field_uint (uiout, "insn end", end);
543 }
544
545 /* Print the source line information for a function call history line. */
546
547 static void
548 btrace_call_history_src_line (struct ui_out *uiout,
549 const struct btrace_function *bfun)
550 {
551 struct symbol *sym;
552 int begin, end;
553
554 sym = bfun->sym;
555 if (sym == NULL)
556 return;
557
558 ui_out_field_string (uiout, "file",
559 symtab_to_filename_for_display (sym->symtab));
560
561 begin = bfun->lbegin;
562 end = bfun->lend;
563
564 if (end < begin)
565 return;
566
567 ui_out_text (uiout, ":");
568 ui_out_field_int (uiout, "min line", begin);
569
570 if (end == begin)
571 return;
572
573 ui_out_text (uiout, ",");
574 ui_out_field_int (uiout, "max line", end);
575 }
576
577 /* Get the name of a branch trace function. */
578
579 static const char *
580 btrace_get_bfun_name (const struct btrace_function *bfun)
581 {
582 struct minimal_symbol *msym;
583 struct symbol *sym;
584
585 if (bfun == NULL)
586 return "??";
587
588 msym = bfun->msym;
589 sym = bfun->sym;
590
591 if (sym != NULL)
592 return SYMBOL_PRINT_NAME (sym);
593 else if (msym != NULL)
594 return MSYMBOL_PRINT_NAME (msym);
595 else
596 return "??";
597 }
598
599 /* Disassemble a section of the recorded function trace. */
600
601 static void
602 btrace_call_history (struct ui_out *uiout,
603 const struct btrace_thread_info *btinfo,
604 const struct btrace_call_iterator *begin,
605 const struct btrace_call_iterator *end,
606 enum record_print_flag flags)
607 {
608 struct btrace_call_iterator it;
609
610 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
611 btrace_call_number (end));
612
613 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
614 {
615 const struct btrace_function *bfun;
616 struct minimal_symbol *msym;
617 struct symbol *sym;
618
619 bfun = btrace_call_get (&it);
620 sym = bfun->sym;
621 msym = bfun->msym;
622
623 /* Print the function index. */
624 ui_out_field_uint (uiout, "index", bfun->number);
625 ui_out_text (uiout, "\t");
626
627 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
628 {
629 int level = bfun->level + btinfo->level, i;
630
631 for (i = 0; i < level; ++i)
632 ui_out_text (uiout, " ");
633 }
634
635 if (sym != NULL)
636 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
637 else if (msym != NULL)
638 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
639 else if (!ui_out_is_mi_like_p (uiout))
640 ui_out_field_string (uiout, "function", "??");
641
642 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
643 {
644 ui_out_text (uiout, _("\tinst "));
645 btrace_call_history_insn_range (uiout, bfun);
646 }
647
648 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
649 {
650 ui_out_text (uiout, _("\tat "));
651 btrace_call_history_src_line (uiout, bfun);
652 }
653
654 ui_out_text (uiout, "\n");
655 }
656 }
657
658 /* The to_call_history method of target record-btrace. */
659
660 static void
661 record_btrace_call_history (struct target_ops *self, int size, int flags)
662 {
663 struct btrace_thread_info *btinfo;
664 struct btrace_call_history *history;
665 struct btrace_call_iterator begin, end;
666 struct cleanup *uiout_cleanup;
667 struct ui_out *uiout;
668 unsigned int context, covered;
669
670 uiout = current_uiout;
671 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
672 "insn history");
673 context = abs (size);
674 if (context == 0)
675 error (_("Bad record function-call-history-size."));
676
677 btinfo = require_btrace ();
678 history = btinfo->call_history;
679 if (history == NULL)
680 {
681 struct btrace_insn_iterator *replay;
682
683 DEBUG ("call-history (0x%x): %d", flags, size);
684
685 /* If we're replaying, we start at the replay position. Otherwise, we
686 start at the tail of the trace. */
687 replay = btinfo->replay;
688 if (replay != NULL)
689 {
690 begin.function = replay->function;
691 begin.btinfo = btinfo;
692 }
693 else
694 btrace_call_end (&begin, btinfo);
695
696 /* We start from here and expand in the requested direction. Then we
697 expand in the other direction, as well, to fill up any remaining
698 context. */
699 end = begin;
700 if (size < 0)
701 {
702 /* We want the current position covered, as well. */
703 covered = btrace_call_next (&end, 1);
704 covered += btrace_call_prev (&begin, context - covered);
705 covered += btrace_call_next (&end, context - covered);
706 }
707 else
708 {
709 covered = btrace_call_next (&end, context);
710 covered += btrace_call_prev (&begin, context- covered);
711 }
712 }
713 else
714 {
715 begin = history->begin;
716 end = history->end;
717
718 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
719 btrace_call_number (&begin), btrace_call_number (&end));
720
721 if (size < 0)
722 {
723 end = begin;
724 covered = btrace_call_prev (&begin, context);
725 }
726 else
727 {
728 begin = end;
729 covered = btrace_call_next (&end, context);
730 }
731 }
732
733 if (covered > 0)
734 btrace_call_history (uiout, btinfo, &begin, &end, flags);
735 else
736 {
737 if (size < 0)
738 printf_unfiltered (_("At the start of the branch trace record.\n"));
739 else
740 printf_unfiltered (_("At the end of the branch trace record.\n"));
741 }
742
743 btrace_set_call_history (btinfo, &begin, &end);
744 do_cleanups (uiout_cleanup);
745 }
746
747 /* The to_call_history_range method of target record-btrace. */
748
749 static void
750 record_btrace_call_history_range (struct target_ops *self,
751 ULONGEST from, ULONGEST to, int flags)
752 {
753 struct btrace_thread_info *btinfo;
754 struct btrace_call_history *history;
755 struct btrace_call_iterator begin, end;
756 struct cleanup *uiout_cleanup;
757 struct ui_out *uiout;
758 unsigned int low, high;
759 int found;
760
761 uiout = current_uiout;
762 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
763 "func history");
764 low = from;
765 high = to;
766
767 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
768
769 /* Check for wrap-arounds. */
770 if (low != from || high != to)
771 error (_("Bad range."));
772
773 if (high < low)
774 error (_("Bad range."));
775
776 btinfo = require_btrace ();
777
778 found = btrace_find_call_by_number (&begin, btinfo, low);
779 if (found == 0)
780 error (_("Range out of bounds."));
781
782 found = btrace_find_call_by_number (&end, btinfo, high);
783 if (found == 0)
784 {
785 /* Silently truncate the range. */
786 btrace_call_end (&end, btinfo);
787 }
788 else
789 {
790 /* We want both begin and end to be inclusive. */
791 btrace_call_next (&end, 1);
792 }
793
794 btrace_call_history (uiout, btinfo, &begin, &end, flags);
795 btrace_set_call_history (btinfo, &begin, &end);
796
797 do_cleanups (uiout_cleanup);
798 }
799
800 /* The to_call_history_from method of target record-btrace. */
801
802 static void
803 record_btrace_call_history_from (struct target_ops *self,
804 ULONGEST from, int size, int flags)
805 {
806 ULONGEST begin, end, context;
807
808 context = abs (size);
809 if (context == 0)
810 error (_("Bad record function-call-history-size."));
811
812 if (size < 0)
813 {
814 end = from;
815
816 if (from < context)
817 begin = 0;
818 else
819 begin = from - context + 1;
820 }
821 else
822 {
823 begin = from;
824 end = from + context - 1;
825
826 /* Check for wrap-around. */
827 if (end < begin)
828 end = ULONGEST_MAX;
829 }
830
831 record_btrace_call_history_range (self, begin, end, flags);
832 }
833
834 /* The to_record_is_replaying method of target record-btrace. */
835
836 static int
837 record_btrace_is_replaying (struct target_ops *self)
838 {
839 struct thread_info *tp;
840
841 ALL_NON_EXITED_THREADS (tp)
842 if (btrace_is_replaying (tp))
843 return 1;
844
845 return 0;
846 }
847
848 /* The to_xfer_partial method of target record-btrace. */
849
850 static enum target_xfer_status
851 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
852 const char *annex, gdb_byte *readbuf,
853 const gdb_byte *writebuf, ULONGEST offset,
854 ULONGEST len, ULONGEST *xfered_len)
855 {
856 struct target_ops *t;
857
858 /* Filter out requests that don't make sense during replay. */
859 if (replay_memory_access == replay_memory_access_read_only
860 && !record_btrace_generating_corefile
861 && record_btrace_is_replaying (ops))
862 {
863 switch (object)
864 {
865 case TARGET_OBJECT_MEMORY:
866 {
867 struct target_section *section;
868
869 /* We do not allow writing memory in general. */
870 if (writebuf != NULL)
871 {
872 *xfered_len = len;
873 return TARGET_XFER_UNAVAILABLE;
874 }
875
876 /* We allow reading readonly memory. */
877 section = target_section_by_addr (ops, offset);
878 if (section != NULL)
879 {
880 /* Check if the section we found is readonly. */
881 if ((bfd_get_section_flags (section->the_bfd_section->owner,
882 section->the_bfd_section)
883 & SEC_READONLY) != 0)
884 {
885 /* Truncate the request to fit into this section. */
886 len = min (len, section->endaddr - offset);
887 break;
888 }
889 }
890
891 *xfered_len = len;
892 return TARGET_XFER_UNAVAILABLE;
893 }
894 }
895 }
896
897 /* Forward the request. */
898 ops = ops->beneath;
899 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
900 offset, len, xfered_len);
901 }
902
903 /* The to_insert_breakpoint method of target record-btrace. */
904
905 static int
906 record_btrace_insert_breakpoint (struct target_ops *ops,
907 struct gdbarch *gdbarch,
908 struct bp_target_info *bp_tgt)
909 {
910 volatile struct gdb_exception except;
911 const char *old;
912 int ret;
913
914 /* Inserting breakpoints requires accessing memory. Allow it for the
915 duration of this function. */
916 old = replay_memory_access;
917 replay_memory_access = replay_memory_access_read_write;
918
919 ret = 0;
920 TRY_CATCH (except, RETURN_MASK_ALL)
921 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
922
923 replay_memory_access = old;
924
925 if (except.reason < 0)
926 throw_exception (except);
927
928 return ret;
929 }
930
931 /* The to_remove_breakpoint method of target record-btrace. */
932
933 static int
934 record_btrace_remove_breakpoint (struct target_ops *ops,
935 struct gdbarch *gdbarch,
936 struct bp_target_info *bp_tgt)
937 {
938 volatile struct gdb_exception except;
939 const char *old;
940 int ret;
941
942 /* Removing breakpoints requires accessing memory. Allow it for the
943 duration of this function. */
944 old = replay_memory_access;
945 replay_memory_access = replay_memory_access_read_write;
946
947 ret = 0;
948 TRY_CATCH (except, RETURN_MASK_ALL)
949 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
950
951 replay_memory_access = old;
952
953 if (except.reason < 0)
954 throw_exception (except);
955
956 return ret;
957 }
958
959 /* The to_fetch_registers method of target record-btrace. */
960
961 static void
962 record_btrace_fetch_registers (struct target_ops *ops,
963 struct regcache *regcache, int regno)
964 {
965 struct btrace_insn_iterator *replay;
966 struct thread_info *tp;
967
968 tp = find_thread_ptid (inferior_ptid);
969 gdb_assert (tp != NULL);
970
971 replay = tp->btrace.replay;
972 if (replay != NULL && !record_btrace_generating_corefile)
973 {
974 const struct btrace_insn *insn;
975 struct gdbarch *gdbarch;
976 int pcreg;
977
978 gdbarch = get_regcache_arch (regcache);
979 pcreg = gdbarch_pc_regnum (gdbarch);
980 if (pcreg < 0)
981 return;
982
983 /* We can only provide the PC register. */
984 if (regno >= 0 && regno != pcreg)
985 return;
986
987 insn = btrace_insn_get (replay);
988 gdb_assert (insn != NULL);
989
990 regcache_raw_supply (regcache, regno, &insn->pc);
991 }
992 else
993 {
994 struct target_ops *t = ops->beneath;
995
996 t->to_fetch_registers (t, regcache, regno);
997 }
998 }
999
1000 /* The to_store_registers method of target record-btrace. */
1001
1002 static void
1003 record_btrace_store_registers (struct target_ops *ops,
1004 struct regcache *regcache, int regno)
1005 {
1006 struct target_ops *t;
1007
1008 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1009 error (_("This record target does not allow writing registers."));
1010
1011 gdb_assert (may_write_registers != 0);
1012
1013 t = ops->beneath;
1014 t->to_store_registers (t, regcache, regno);
1015 }
1016
1017 /* The to_prepare_to_store method of target record-btrace. */
1018
1019 static void
1020 record_btrace_prepare_to_store (struct target_ops *ops,
1021 struct regcache *regcache)
1022 {
1023 struct target_ops *t;
1024
1025 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1026 return;
1027
1028 t = ops->beneath;
1029 t->to_prepare_to_store (t, regcache);
1030 }
1031
1032 /* The branch trace frame cache. */
1033
1034 struct btrace_frame_cache
1035 {
1036 /* The thread. */
1037 struct thread_info *tp;
1038
1039 /* The frame info. */
1040 struct frame_info *frame;
1041
1042 /* The branch trace function segment. */
1043 const struct btrace_function *bfun;
1044 };
1045
1046 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1047
1048 static htab_t bfcache;
1049
1050 /* hash_f for htab_create_alloc of bfcache. */
1051
1052 static hashval_t
1053 bfcache_hash (const void *arg)
1054 {
1055 const struct btrace_frame_cache *cache = arg;
1056
1057 return htab_hash_pointer (cache->frame);
1058 }
1059
1060 /* eq_f for htab_create_alloc of bfcache. */
1061
1062 static int
1063 bfcache_eq (const void *arg1, const void *arg2)
1064 {
1065 const struct btrace_frame_cache *cache1 = arg1;
1066 const struct btrace_frame_cache *cache2 = arg2;
1067
1068 return cache1->frame == cache2->frame;
1069 }
1070
1071 /* Create a new btrace frame cache. */
1072
1073 static struct btrace_frame_cache *
1074 bfcache_new (struct frame_info *frame)
1075 {
1076 struct btrace_frame_cache *cache;
1077 void **slot;
1078
1079 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1080 cache->frame = frame;
1081
1082 slot = htab_find_slot (bfcache, cache, INSERT);
1083 gdb_assert (*slot == NULL);
1084 *slot = cache;
1085
1086 return cache;
1087 }
1088
1089 /* Extract the branch trace function from a branch trace frame. */
1090
1091 static const struct btrace_function *
1092 btrace_get_frame_function (struct frame_info *frame)
1093 {
1094 const struct btrace_frame_cache *cache;
1095 const struct btrace_function *bfun;
1096 struct btrace_frame_cache pattern;
1097 void **slot;
1098
1099 pattern.frame = frame;
1100
1101 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1102 if (slot == NULL)
1103 return NULL;
1104
1105 cache = *slot;
1106 return cache->bfun;
1107 }
1108
1109 /* Implement stop_reason method for record_btrace_frame_unwind. */
1110
1111 static enum unwind_stop_reason
1112 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1113 void **this_cache)
1114 {
1115 const struct btrace_frame_cache *cache;
1116 const struct btrace_function *bfun;
1117
1118 cache = *this_cache;
1119 bfun = cache->bfun;
1120 gdb_assert (bfun != NULL);
1121
1122 if (bfun->up == NULL)
1123 return UNWIND_UNAVAILABLE;
1124
1125 return UNWIND_NO_REASON;
1126 }
1127
1128 /* Implement this_id method for record_btrace_frame_unwind. */
1129
1130 static void
1131 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1132 struct frame_id *this_id)
1133 {
1134 const struct btrace_frame_cache *cache;
1135 const struct btrace_function *bfun;
1136 CORE_ADDR code, special;
1137
1138 cache = *this_cache;
1139
1140 bfun = cache->bfun;
1141 gdb_assert (bfun != NULL);
1142
1143 while (bfun->segment.prev != NULL)
1144 bfun = bfun->segment.prev;
1145
1146 code = get_frame_func (this_frame);
1147 special = bfun->number;
1148
1149 *this_id = frame_id_build_unavailable_stack_special (code, special);
1150
1151 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1152 btrace_get_bfun_name (cache->bfun),
1153 core_addr_to_string_nz (this_id->code_addr),
1154 core_addr_to_string_nz (this_id->special_addr));
1155 }
1156
1157 /* Implement prev_register method for record_btrace_frame_unwind. */
1158
1159 static struct value *
1160 record_btrace_frame_prev_register (struct frame_info *this_frame,
1161 void **this_cache,
1162 int regnum)
1163 {
1164 const struct btrace_frame_cache *cache;
1165 const struct btrace_function *bfun, *caller;
1166 const struct btrace_insn *insn;
1167 struct gdbarch *gdbarch;
1168 CORE_ADDR pc;
1169 int pcreg;
1170
1171 gdbarch = get_frame_arch (this_frame);
1172 pcreg = gdbarch_pc_regnum (gdbarch);
1173 if (pcreg < 0 || regnum != pcreg)
1174 throw_error (NOT_AVAILABLE_ERROR,
1175 _("Registers are not available in btrace record history"));
1176
1177 cache = *this_cache;
1178 bfun = cache->bfun;
1179 gdb_assert (bfun != NULL);
1180
1181 caller = bfun->up;
1182 if (caller == NULL)
1183 throw_error (NOT_AVAILABLE_ERROR,
1184 _("No caller in btrace record history"));
1185
1186 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1187 {
1188 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1189 pc = insn->pc;
1190 }
1191 else
1192 {
1193 insn = VEC_last (btrace_insn_s, caller->insn);
1194 pc = insn->pc;
1195
1196 pc += gdb_insn_length (gdbarch, pc);
1197 }
1198
1199 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1200 btrace_get_bfun_name (bfun), bfun->level,
1201 core_addr_to_string_nz (pc));
1202
1203 return frame_unwind_got_address (this_frame, regnum, pc);
1204 }
1205
1206 /* Implement sniffer method for record_btrace_frame_unwind. */
1207
1208 static int
1209 record_btrace_frame_sniffer (const struct frame_unwind *self,
1210 struct frame_info *this_frame,
1211 void **this_cache)
1212 {
1213 const struct btrace_function *bfun;
1214 struct btrace_frame_cache *cache;
1215 struct thread_info *tp;
1216 struct frame_info *next;
1217
1218 /* THIS_FRAME does not contain a reference to its thread. */
1219 tp = find_thread_ptid (inferior_ptid);
1220 gdb_assert (tp != NULL);
1221
1222 bfun = NULL;
1223 next = get_next_frame (this_frame);
1224 if (next == NULL)
1225 {
1226 const struct btrace_insn_iterator *replay;
1227
1228 replay = tp->btrace.replay;
1229 if (replay != NULL)
1230 bfun = replay->function;
1231 }
1232 else
1233 {
1234 const struct btrace_function *callee;
1235
1236 callee = btrace_get_frame_function (next);
1237 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1238 bfun = callee->up;
1239 }
1240
1241 if (bfun == NULL)
1242 return 0;
1243
1244 DEBUG ("[frame] sniffed frame for %s on level %d",
1245 btrace_get_bfun_name (bfun), bfun->level);
1246
1247 /* This is our frame. Initialize the frame cache. */
1248 cache = bfcache_new (this_frame);
1249 cache->tp = tp;
1250 cache->bfun = bfun;
1251
1252 *this_cache = cache;
1253 return 1;
1254 }
1255
1256 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1257
1258 static int
1259 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1260 struct frame_info *this_frame,
1261 void **this_cache)
1262 {
1263 const struct btrace_function *bfun, *callee;
1264 struct btrace_frame_cache *cache;
1265 struct frame_info *next;
1266
1267 next = get_next_frame (this_frame);
1268 if (next == NULL)
1269 return 0;
1270
1271 callee = btrace_get_frame_function (next);
1272 if (callee == NULL)
1273 return 0;
1274
1275 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1276 return 0;
1277
1278 bfun = callee->up;
1279 if (bfun == NULL)
1280 return 0;
1281
1282 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1283 btrace_get_bfun_name (bfun), bfun->level);
1284
1285 /* This is our frame. Initialize the frame cache. */
1286 cache = bfcache_new (this_frame);
1287 cache->tp = find_thread_ptid (inferior_ptid);
1288 cache->bfun = bfun;
1289
1290 *this_cache = cache;
1291 return 1;
1292 }
1293
1294 static void
1295 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1296 {
1297 struct btrace_frame_cache *cache;
1298 void **slot;
1299
1300 cache = this_cache;
1301
1302 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1303 gdb_assert (slot != NULL);
1304
1305 htab_remove_elt (bfcache, cache);
1306 }
1307
1308 /* btrace recording does not store previous memory content, neither the stack
1309 frames content. Any unwinding would return errorneous results as the stack
1310 contents no longer matches the changed PC value restored from history.
1311 Therefore this unwinder reports any possibly unwound registers as
1312 <unavailable>. */
1313
1314 const struct frame_unwind record_btrace_frame_unwind =
1315 {
1316 NORMAL_FRAME,
1317 record_btrace_frame_unwind_stop_reason,
1318 record_btrace_frame_this_id,
1319 record_btrace_frame_prev_register,
1320 NULL,
1321 record_btrace_frame_sniffer,
1322 record_btrace_frame_dealloc_cache
1323 };
1324
1325 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1326 {
1327 TAILCALL_FRAME,
1328 record_btrace_frame_unwind_stop_reason,
1329 record_btrace_frame_this_id,
1330 record_btrace_frame_prev_register,
1331 NULL,
1332 record_btrace_tailcall_frame_sniffer,
1333 record_btrace_frame_dealloc_cache
1334 };
1335
1336 /* Implement the to_get_unwinder method. */
1337
1338 static const struct frame_unwind *
1339 record_btrace_to_get_unwinder (struct target_ops *self)
1340 {
1341 return &record_btrace_frame_unwind;
1342 }
1343
1344 /* Implement the to_get_tailcall_unwinder method. */
1345
1346 static const struct frame_unwind *
1347 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1348 {
1349 return &record_btrace_tailcall_frame_unwind;
1350 }
1351
1352 /* Indicate that TP should be resumed according to FLAG. */
1353
1354 static void
1355 record_btrace_resume_thread (struct thread_info *tp,
1356 enum btrace_thread_flag flag)
1357 {
1358 struct btrace_thread_info *btinfo;
1359
1360 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1361
1362 btinfo = &tp->btrace;
1363
1364 if ((btinfo->flags & BTHR_MOVE) != 0)
1365 error (_("Thread already moving."));
1366
1367 /* Fetch the latest branch trace. */
1368 btrace_fetch (tp);
1369
1370 btinfo->flags |= flag;
1371 }
1372
1373 /* Find the thread to resume given a PTID. */
1374
1375 static struct thread_info *
1376 record_btrace_find_resume_thread (ptid_t ptid)
1377 {
1378 struct thread_info *tp;
1379
1380 /* When asked to resume everything, we pick the current thread. */
1381 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1382 ptid = inferior_ptid;
1383
1384 return find_thread_ptid (ptid);
1385 }
1386
1387 /* Start replaying a thread. */
1388
1389 static struct btrace_insn_iterator *
1390 record_btrace_start_replaying (struct thread_info *tp)
1391 {
1392 volatile struct gdb_exception except;
1393 struct btrace_insn_iterator *replay;
1394 struct btrace_thread_info *btinfo;
1395 int executing;
1396
1397 btinfo = &tp->btrace;
1398 replay = NULL;
1399
1400 /* We can't start replaying without trace. */
1401 if (btinfo->begin == NULL)
1402 return NULL;
1403
1404 /* Clear the executing flag to allow changes to the current frame.
1405 We are not actually running, yet. We just started a reverse execution
1406 command or a record goto command.
1407 For the latter, EXECUTING is false and this has no effect.
1408 For the former, EXECUTING is true and we're in to_wait, about to
1409 move the thread. Since we need to recompute the stack, we temporarily
1410 set EXECUTING to flase. */
1411 executing = is_executing (tp->ptid);
1412 set_executing (tp->ptid, 0);
1413
1414 /* GDB stores the current frame_id when stepping in order to detects steps
1415 into subroutines.
1416 Since frames are computed differently when we're replaying, we need to
1417 recompute those stored frames and fix them up so we can still detect
1418 subroutines after we started replaying. */
1419 TRY_CATCH (except, RETURN_MASK_ALL)
1420 {
1421 struct frame_info *frame;
1422 struct frame_id frame_id;
1423 int upd_step_frame_id, upd_step_stack_frame_id;
1424
1425 /* The current frame without replaying - computed via normal unwind. */
1426 frame = get_current_frame ();
1427 frame_id = get_frame_id (frame);
1428
1429 /* Check if we need to update any stepping-related frame id's. */
1430 upd_step_frame_id = frame_id_eq (frame_id,
1431 tp->control.step_frame_id);
1432 upd_step_stack_frame_id = frame_id_eq (frame_id,
1433 tp->control.step_stack_frame_id);
1434
1435 /* We start replaying at the end of the branch trace. This corresponds
1436 to the current instruction. */
1437 replay = xmalloc (sizeof (*replay));
1438 btrace_insn_end (replay, btinfo);
1439
1440 /* We're not replaying, yet. */
1441 gdb_assert (btinfo->replay == NULL);
1442 btinfo->replay = replay;
1443
1444 /* Make sure we're not using any stale registers. */
1445 registers_changed_ptid (tp->ptid);
1446
1447 /* The current frame with replaying - computed via btrace unwind. */
1448 frame = get_current_frame ();
1449 frame_id = get_frame_id (frame);
1450
1451 /* Replace stepping related frames where necessary. */
1452 if (upd_step_frame_id)
1453 tp->control.step_frame_id = frame_id;
1454 if (upd_step_stack_frame_id)
1455 tp->control.step_stack_frame_id = frame_id;
1456 }
1457
1458 /* Restore the previous execution state. */
1459 set_executing (tp->ptid, executing);
1460
1461 if (except.reason < 0)
1462 {
1463 xfree (btinfo->replay);
1464 btinfo->replay = NULL;
1465
1466 registers_changed_ptid (tp->ptid);
1467
1468 throw_exception (except);
1469 }
1470
1471 return replay;
1472 }
1473
1474 /* Stop replaying a thread. */
1475
1476 static void
1477 record_btrace_stop_replaying (struct thread_info *tp)
1478 {
1479 struct btrace_thread_info *btinfo;
1480
1481 btinfo = &tp->btrace;
1482
1483 xfree (btinfo->replay);
1484 btinfo->replay = NULL;
1485
1486 /* Make sure we're not leaving any stale registers. */
1487 registers_changed_ptid (tp->ptid);
1488 }
1489
1490 /* The to_resume method of target record-btrace. */
1491
1492 static void
1493 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1494 enum gdb_signal signal)
1495 {
1496 struct thread_info *tp, *other;
1497 enum btrace_thread_flag flag;
1498
1499 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1500
1501 /* Store the execution direction of the last resume. */
1502 record_btrace_resume_exec_dir = execution_direction;
1503
1504 tp = record_btrace_find_resume_thread (ptid);
1505 if (tp == NULL)
1506 error (_("Cannot find thread to resume."));
1507
1508 /* Stop replaying other threads if the thread to resume is not replaying. */
1509 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1510 ALL_NON_EXITED_THREADS (other)
1511 record_btrace_stop_replaying (other);
1512
1513 /* As long as we're not replaying, just forward the request. */
1514 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1515 {
1516 ops = ops->beneath;
1517 return ops->to_resume (ops, ptid, step, signal);
1518 }
1519
1520 /* Compute the btrace thread flag for the requested move. */
1521 if (step == 0)
1522 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1523 else
1524 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1525
1526 /* At the moment, we only move a single thread. We could also move
1527 all threads in parallel by single-stepping each resumed thread
1528 until the first runs into an event.
1529 When we do that, we would want to continue all other threads.
1530 For now, just resume one thread to not confuse to_wait. */
1531 record_btrace_resume_thread (tp, flag);
1532
1533 /* We just indicate the resume intent here. The actual stepping happens in
1534 record_btrace_wait below. */
1535
1536 /* Async support. */
1537 if (target_can_async_p ())
1538 {
1539 target_async (inferior_event_handler, 0);
1540 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1541 }
1542 }
1543
1544 /* Find a thread to move. */
1545
1546 static struct thread_info *
1547 record_btrace_find_thread_to_move (ptid_t ptid)
1548 {
1549 struct thread_info *tp;
1550
1551 /* First check the parameter thread. */
1552 tp = find_thread_ptid (ptid);
1553 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1554 return tp;
1555
1556 /* Otherwise, find one other thread that has been resumed. */
1557 ALL_NON_EXITED_THREADS (tp)
1558 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1559 return tp;
1560
1561 return NULL;
1562 }
1563
1564 /* Return a target_waitstatus indicating that we ran out of history. */
1565
1566 static struct target_waitstatus
1567 btrace_step_no_history (void)
1568 {
1569 struct target_waitstatus status;
1570
1571 status.kind = TARGET_WAITKIND_NO_HISTORY;
1572
1573 return status;
1574 }
1575
1576 /* Return a target_waitstatus indicating that a step finished. */
1577
1578 static struct target_waitstatus
1579 btrace_step_stopped (void)
1580 {
1581 struct target_waitstatus status;
1582
1583 status.kind = TARGET_WAITKIND_STOPPED;
1584 status.value.sig = GDB_SIGNAL_TRAP;
1585
1586 return status;
1587 }
1588
1589 /* Clear the record histories. */
1590
1591 static void
1592 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1593 {
1594 xfree (btinfo->insn_history);
1595 xfree (btinfo->call_history);
1596
1597 btinfo->insn_history = NULL;
1598 btinfo->call_history = NULL;
1599 }
1600
1601 /* Step a single thread. */
1602
1603 static struct target_waitstatus
1604 record_btrace_step_thread (struct thread_info *tp)
1605 {
1606 struct btrace_insn_iterator *replay, end;
1607 struct btrace_thread_info *btinfo;
1608 struct address_space *aspace;
1609 struct inferior *inf;
1610 enum btrace_thread_flag flags;
1611 unsigned int steps;
1612
1613 /* We can't step without an execution history. */
1614 if (btrace_is_empty (tp))
1615 return btrace_step_no_history ();
1616
1617 btinfo = &tp->btrace;
1618 replay = btinfo->replay;
1619
1620 flags = btinfo->flags & BTHR_MOVE;
1621 btinfo->flags &= ~BTHR_MOVE;
1622
1623 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1624
1625 switch (flags)
1626 {
1627 default:
1628 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1629
1630 case BTHR_STEP:
1631 /* We're done if we're not replaying. */
1632 if (replay == NULL)
1633 return btrace_step_no_history ();
1634
1635 /* We are always able to step at least once. */
1636 steps = btrace_insn_next (replay, 1);
1637 gdb_assert (steps == 1);
1638
1639 /* Determine the end of the instruction trace. */
1640 btrace_insn_end (&end, btinfo);
1641
1642 /* We stop replaying if we reached the end of the trace. */
1643 if (btrace_insn_cmp (replay, &end) == 0)
1644 record_btrace_stop_replaying (tp);
1645
1646 return btrace_step_stopped ();
1647
1648 case BTHR_RSTEP:
1649 /* Start replaying if we're not already doing so. */
1650 if (replay == NULL)
1651 replay = record_btrace_start_replaying (tp);
1652
1653 /* If we can't step any further, we reached the end of the history. */
1654 steps = btrace_insn_prev (replay, 1);
1655 if (steps == 0)
1656 return btrace_step_no_history ();
1657
1658 return btrace_step_stopped ();
1659
1660 case BTHR_CONT:
1661 /* We're done if we're not replaying. */
1662 if (replay == NULL)
1663 return btrace_step_no_history ();
1664
1665 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1666 aspace = inf->aspace;
1667
1668 /* Determine the end of the instruction trace. */
1669 btrace_insn_end (&end, btinfo);
1670
1671 for (;;)
1672 {
1673 const struct btrace_insn *insn;
1674
1675 /* We are always able to step at least once. */
1676 steps = btrace_insn_next (replay, 1);
1677 gdb_assert (steps == 1);
1678
1679 /* We stop replaying if we reached the end of the trace. */
1680 if (btrace_insn_cmp (replay, &end) == 0)
1681 {
1682 record_btrace_stop_replaying (tp);
1683 return btrace_step_no_history ();
1684 }
1685
1686 insn = btrace_insn_get (replay);
1687 gdb_assert (insn);
1688
1689 DEBUG ("stepping %d (%s) ... %s", tp->num,
1690 target_pid_to_str (tp->ptid),
1691 core_addr_to_string_nz (insn->pc));
1692
1693 if (breakpoint_here_p (aspace, insn->pc))
1694 return btrace_step_stopped ();
1695 }
1696
1697 case BTHR_RCONT:
1698 /* Start replaying if we're not already doing so. */
1699 if (replay == NULL)
1700 replay = record_btrace_start_replaying (tp);
1701
1702 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1703 aspace = inf->aspace;
1704
1705 for (;;)
1706 {
1707 const struct btrace_insn *insn;
1708
1709 /* If we can't step any further, we're done. */
1710 steps = btrace_insn_prev (replay, 1);
1711 if (steps == 0)
1712 return btrace_step_no_history ();
1713
1714 insn = btrace_insn_get (replay);
1715 gdb_assert (insn);
1716
1717 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1718 target_pid_to_str (tp->ptid),
1719 core_addr_to_string_nz (insn->pc));
1720
1721 if (breakpoint_here_p (aspace, insn->pc))
1722 return btrace_step_stopped ();
1723 }
1724 }
1725 }
1726
1727 /* The to_wait method of target record-btrace. */
1728
1729 static ptid_t
1730 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1731 struct target_waitstatus *status, int options)
1732 {
1733 struct thread_info *tp, *other;
1734
1735 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1736
1737 /* As long as we're not replaying, just forward the request. */
1738 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1739 {
1740 ops = ops->beneath;
1741 return ops->to_wait (ops, ptid, status, options);
1742 }
1743
1744 /* Let's find a thread to move. */
1745 tp = record_btrace_find_thread_to_move (ptid);
1746 if (tp == NULL)
1747 {
1748 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1749
1750 status->kind = TARGET_WAITKIND_IGNORE;
1751 return minus_one_ptid;
1752 }
1753
1754 /* We only move a single thread. We're not able to correlate threads. */
1755 *status = record_btrace_step_thread (tp);
1756
1757 /* Stop all other threads. */
1758 if (!non_stop)
1759 ALL_NON_EXITED_THREADS (other)
1760 other->btrace.flags &= ~BTHR_MOVE;
1761
1762 /* Start record histories anew from the current position. */
1763 record_btrace_clear_histories (&tp->btrace);
1764
1765 /* We moved the replay position but did not update registers. */
1766 registers_changed_ptid (tp->ptid);
1767
1768 return tp->ptid;
1769 }
1770
1771 /* The to_can_execute_reverse method of target record-btrace. */
1772
1773 static int
1774 record_btrace_can_execute_reverse (struct target_ops *self)
1775 {
1776 return 1;
1777 }
1778
1779 /* The to_decr_pc_after_break method of target record-btrace. */
1780
1781 static CORE_ADDR
1782 record_btrace_decr_pc_after_break (struct target_ops *ops,
1783 struct gdbarch *gdbarch)
1784 {
1785 /* When replaying, we do not actually execute the breakpoint instruction
1786 so there is no need to adjust the PC after hitting a breakpoint. */
1787 if (record_btrace_is_replaying (ops))
1788 return 0;
1789
1790 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
1791 }
1792
1793 /* The to_update_thread_list method of target record-btrace. */
1794
1795 static void
1796 record_btrace_update_thread_list (struct target_ops *ops)
1797 {
1798 /* We don't add or remove threads during replay. */
1799 if (record_btrace_is_replaying (ops))
1800 return;
1801
1802 /* Forward the request. */
1803 ops = ops->beneath;
1804 ops->to_update_thread_list (ops);
1805 }
1806
1807 /* The to_thread_alive method of target record-btrace. */
1808
1809 static int
1810 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1811 {
1812 /* We don't add or remove threads during replay. */
1813 if (record_btrace_is_replaying (ops))
1814 return find_thread_ptid (ptid) != NULL;
1815
1816 /* Forward the request. */
1817 ops = ops->beneath;
1818 return ops->to_thread_alive (ops, ptid);
1819 }
1820
1821 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
1822 is stopped. */
1823
1824 static void
1825 record_btrace_set_replay (struct thread_info *tp,
1826 const struct btrace_insn_iterator *it)
1827 {
1828 struct btrace_thread_info *btinfo;
1829
1830 btinfo = &tp->btrace;
1831
1832 if (it == NULL || it->function == NULL)
1833 record_btrace_stop_replaying (tp);
1834 else
1835 {
1836 if (btinfo->replay == NULL)
1837 record_btrace_start_replaying (tp);
1838 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1839 return;
1840
1841 *btinfo->replay = *it;
1842 registers_changed_ptid (tp->ptid);
1843 }
1844
1845 /* Start anew from the new replay position. */
1846 record_btrace_clear_histories (btinfo);
1847 }
1848
1849 /* The to_goto_record_begin method of target record-btrace. */
1850
1851 static void
1852 record_btrace_goto_begin (struct target_ops *self)
1853 {
1854 struct thread_info *tp;
1855 struct btrace_insn_iterator begin;
1856
1857 tp = require_btrace_thread ();
1858
1859 btrace_insn_begin (&begin, &tp->btrace);
1860 record_btrace_set_replay (tp, &begin);
1861
1862 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1863 }
1864
1865 /* The to_goto_record_end method of target record-btrace. */
1866
1867 static void
1868 record_btrace_goto_end (struct target_ops *ops)
1869 {
1870 struct thread_info *tp;
1871
1872 tp = require_btrace_thread ();
1873
1874 record_btrace_set_replay (tp, NULL);
1875
1876 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1877 }
1878
1879 /* The to_goto_record method of target record-btrace. */
1880
1881 static void
1882 record_btrace_goto (struct target_ops *self, ULONGEST insn)
1883 {
1884 struct thread_info *tp;
1885 struct btrace_insn_iterator it;
1886 unsigned int number;
1887 int found;
1888
1889 number = insn;
1890
1891 /* Check for wrap-arounds. */
1892 if (number != insn)
1893 error (_("Instruction number out of range."));
1894
1895 tp = require_btrace_thread ();
1896
1897 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1898 if (found == 0)
1899 error (_("No such instruction."));
1900
1901 record_btrace_set_replay (tp, &it);
1902
1903 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1904 }
1905
1906 /* The to_execution_direction target method. */
1907
1908 static enum exec_direction_kind
1909 record_btrace_execution_direction (struct target_ops *self)
1910 {
1911 return record_btrace_resume_exec_dir;
1912 }
1913
1914 /* The to_prepare_to_generate_core target method. */
1915
1916 static void
1917 record_btrace_prepare_to_generate_core (struct target_ops *self)
1918 {
1919 record_btrace_generating_corefile = 1;
1920 }
1921
1922 /* The to_done_generating_core target method. */
1923
1924 static void
1925 record_btrace_done_generating_core (struct target_ops *self)
1926 {
1927 record_btrace_generating_corefile = 0;
1928 }
1929
1930 /* Initialize the record-btrace target ops. */
1931
1932 static void
1933 init_record_btrace_ops (void)
1934 {
1935 struct target_ops *ops;
1936
1937 ops = &record_btrace_ops;
1938 ops->to_shortname = "record-btrace";
1939 ops->to_longname = "Branch tracing target";
1940 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1941 ops->to_open = record_btrace_open;
1942 ops->to_close = record_btrace_close;
1943 ops->to_detach = record_detach;
1944 ops->to_disconnect = record_disconnect;
1945 ops->to_mourn_inferior = record_mourn_inferior;
1946 ops->to_kill = record_kill;
1947 ops->to_stop_recording = record_btrace_stop_recording;
1948 ops->to_info_record = record_btrace_info;
1949 ops->to_insn_history = record_btrace_insn_history;
1950 ops->to_insn_history_from = record_btrace_insn_history_from;
1951 ops->to_insn_history_range = record_btrace_insn_history_range;
1952 ops->to_call_history = record_btrace_call_history;
1953 ops->to_call_history_from = record_btrace_call_history_from;
1954 ops->to_call_history_range = record_btrace_call_history_range;
1955 ops->to_record_is_replaying = record_btrace_is_replaying;
1956 ops->to_xfer_partial = record_btrace_xfer_partial;
1957 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1958 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1959 ops->to_fetch_registers = record_btrace_fetch_registers;
1960 ops->to_store_registers = record_btrace_store_registers;
1961 ops->to_prepare_to_store = record_btrace_prepare_to_store;
1962 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1963 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
1964 ops->to_resume = record_btrace_resume;
1965 ops->to_wait = record_btrace_wait;
1966 ops->to_update_thread_list = record_btrace_update_thread_list;
1967 ops->to_thread_alive = record_btrace_thread_alive;
1968 ops->to_goto_record_begin = record_btrace_goto_begin;
1969 ops->to_goto_record_end = record_btrace_goto_end;
1970 ops->to_goto_record = record_btrace_goto;
1971 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1972 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
1973 ops->to_execution_direction = record_btrace_execution_direction;
1974 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1975 ops->to_done_generating_core = record_btrace_done_generating_core;
1976 ops->to_stratum = record_stratum;
1977 ops->to_magic = OPS_MAGIC;
1978 }
1979
1980 /* Alias for "target record". */
1981
1982 static void
1983 cmd_record_btrace_start (char *args, int from_tty)
1984 {
1985 if (args != NULL && *args != 0)
1986 error (_("Invalid argument."));
1987
1988 execute_command ("target record-btrace", from_tty);
1989 }
1990
1991 /* The "set record btrace" command. */
1992
1993 static void
1994 cmd_set_record_btrace (char *args, int from_tty)
1995 {
1996 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
1997 }
1998
1999 /* The "show record btrace" command. */
2000
2001 static void
2002 cmd_show_record_btrace (char *args, int from_tty)
2003 {
2004 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2005 }
2006
2007 /* The "show record btrace replay-memory-access" command. */
2008
2009 static void
2010 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2011 struct cmd_list_element *c, const char *value)
2012 {
2013 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2014 replay_memory_access);
2015 }
2016
2017 void _initialize_record_btrace (void);
2018
2019 /* Initialize btrace commands. */
2020
2021 void
2022 _initialize_record_btrace (void)
2023 {
2024 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2025 _("Start branch trace recording."),
2026 &record_cmdlist);
2027 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2028
2029 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2030 _("Set record options"), &set_record_btrace_cmdlist,
2031 "set record btrace ", 0, &set_record_cmdlist);
2032
2033 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2034 _("Show record options"), &show_record_btrace_cmdlist,
2035 "show record btrace ", 0, &show_record_cmdlist);
2036
2037 add_setshow_enum_cmd ("replay-memory-access", no_class,
2038 replay_memory_access_types, &replay_memory_access, _("\
2039 Set what memory accesses are allowed during replay."), _("\
2040 Show what memory accesses are allowed during replay."),
2041 _("Default is READ-ONLY.\n\n\
2042 The btrace record target does not trace data.\n\
2043 The memory therefore corresponds to the live target and not \
2044 to the current replay position.\n\n\
2045 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2046 When READ-WRITE, allow accesses to read-only and read-write memory during \
2047 replay."),
2048 NULL, cmd_show_replay_memory_access,
2049 &set_record_btrace_cmdlist,
2050 &show_record_btrace_cmdlist);
2051
2052 init_record_btrace_ops ();
2053 add_target (&record_btrace_ops);
2054
2055 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2056 xcalloc, xfree);
2057 }