]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Skip gdb.server/ tests if lack of XML support
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 #include "vec.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
76
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
79
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91 #define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101 /* Update the branch trace for the current thread and return a pointer to its
102 thread_info.
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
107 static struct thread_info *
108 require_btrace_thread (void)
109 {
110 struct thread_info *tp;
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
122
123 return tp;
124 }
125
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132 static struct btrace_thread_info *
133 require_btrace (void)
134 {
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
140 }
141
142 /* Enable branch tracing for one thread. Warn on errors. */
143
144 static void
145 record_btrace_enable_warn (struct thread_info *tp)
146 {
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
156 }
157
158 /* Callback function to disable branch tracing for one thread. */
159
160 static void
161 record_btrace_disable_callback (void *arg)
162 {
163 struct thread_info *tp = (struct thread_info *) arg;
164
165 btrace_disable (tp);
166 }
167
168 /* Enable automatic tracing of new threads. */
169
170 static void
171 record_btrace_auto_enable (void)
172 {
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177 }
178
179 /* Disable automatic tracing of new threads. */
180
181 static void
182 record_btrace_auto_disable (void)
183 {
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192 }
193
194 /* The record-btrace async event handler function. */
195
196 static void
197 record_btrace_handle_async_inferior_event (gdb_client_data data)
198 {
199 inferior_event_handler (INF_REG_EVENT, NULL);
200 }
201
202 /* The to_open method of target record-btrace. */
203
204 static void
205 record_btrace_open (const char *args, int from_tty)
206 {
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
209 const char *format;
210
211 DEBUG ("open");
212
213 record_preopen ();
214
215 if (!target_has_execution)
216 error (_("The program is not being run."));
217
218 gdb_assert (record_btrace_thread_observer == NULL);
219
220 disable_chain = make_cleanup (null_cleanup, NULL);
221 ALL_NON_EXITED_THREADS (tp)
222 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
223 {
224 btrace_enable (tp, &record_btrace_conf);
225
226 make_cleanup (record_btrace_disable_callback, tp);
227 }
228
229 record_btrace_auto_enable ();
230
231 push_target (&record_btrace_ops);
232
233 record_btrace_async_inferior_event_handler
234 = create_async_event_handler (record_btrace_handle_async_inferior_event,
235 NULL);
236 record_btrace_generating_corefile = 0;
237
238 format = btrace_format_short_string (record_btrace_conf.format);
239 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
240
241 discard_cleanups (disable_chain);
242 }
243
244 /* The to_stop_recording method of target record-btrace. */
245
246 static void
247 record_btrace_stop_recording (struct target_ops *self)
248 {
249 struct thread_info *tp;
250
251 DEBUG ("stop recording");
252
253 record_btrace_auto_disable ();
254
255 ALL_NON_EXITED_THREADS (tp)
256 if (tp->btrace.target != NULL)
257 btrace_disable (tp);
258 }
259
260 /* The to_close method of target record-btrace. */
261
262 static void
263 record_btrace_close (struct target_ops *self)
264 {
265 struct thread_info *tp;
266
267 if (record_btrace_async_inferior_event_handler != NULL)
268 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
269
270 /* Make sure automatic recording gets disabled even if we did not stop
271 recording before closing the record-btrace target. */
272 record_btrace_auto_disable ();
273
274 /* We should have already stopped recording.
275 Tear down btrace in case we have not. */
276 ALL_NON_EXITED_THREADS (tp)
277 btrace_teardown (tp);
278 }
279
280 /* The to_async method of target record-btrace. */
281
282 static void
283 record_btrace_async (struct target_ops *ops, int enable)
284 {
285 if (enable)
286 mark_async_event_handler (record_btrace_async_inferior_event_handler);
287 else
288 clear_async_event_handler (record_btrace_async_inferior_event_handler);
289
290 ops->beneath->to_async (ops->beneath, enable);
291 }
292
293 /* Adjusts the size and returns a human readable size suffix. */
294
295 static const char *
296 record_btrace_adjust_size (unsigned int *size)
297 {
298 unsigned int sz;
299
300 sz = *size;
301
302 if ((sz & ((1u << 30) - 1)) == 0)
303 {
304 *size = sz >> 30;
305 return "GB";
306 }
307 else if ((sz & ((1u << 20) - 1)) == 0)
308 {
309 *size = sz >> 20;
310 return "MB";
311 }
312 else if ((sz & ((1u << 10) - 1)) == 0)
313 {
314 *size = sz >> 10;
315 return "kB";
316 }
317 else
318 return "";
319 }
320
321 /* Print a BTS configuration. */
322
323 static void
324 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
325 {
326 const char *suffix;
327 unsigned int size;
328
329 size = conf->size;
330 if (size > 0)
331 {
332 suffix = record_btrace_adjust_size (&size);
333 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
334 }
335 }
336
337 /* Print an Intel Processor Trace configuration. */
338
339 static void
340 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
341 {
342 const char *suffix;
343 unsigned int size;
344
345 size = conf->size;
346 if (size > 0)
347 {
348 suffix = record_btrace_adjust_size (&size);
349 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
350 }
351 }
352
353 /* Print a branch tracing configuration. */
354
355 static void
356 record_btrace_print_conf (const struct btrace_config *conf)
357 {
358 printf_unfiltered (_("Recording format: %s.\n"),
359 btrace_format_string (conf->format));
360
361 switch (conf->format)
362 {
363 case BTRACE_FORMAT_NONE:
364 return;
365
366 case BTRACE_FORMAT_BTS:
367 record_btrace_print_bts_conf (&conf->bts);
368 return;
369
370 case BTRACE_FORMAT_PT:
371 record_btrace_print_pt_conf (&conf->pt);
372 return;
373 }
374
375 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
376 }
377
378 /* The to_info_record method of target record-btrace. */
379
380 static void
381 record_btrace_info (struct target_ops *self)
382 {
383 struct btrace_thread_info *btinfo;
384 const struct btrace_config *conf;
385 struct thread_info *tp;
386 unsigned int insns, calls, gaps;
387
388 DEBUG ("info");
389
390 tp = find_thread_ptid (inferior_ptid);
391 if (tp == NULL)
392 error (_("No thread."));
393
394 btinfo = &tp->btrace;
395
396 conf = btrace_conf (btinfo);
397 if (conf != NULL)
398 record_btrace_print_conf (conf);
399
400 btrace_fetch (tp);
401
402 insns = 0;
403 calls = 0;
404 gaps = 0;
405
406 if (!btrace_is_empty (tp))
407 {
408 struct btrace_call_iterator call;
409 struct btrace_insn_iterator insn;
410
411 btrace_call_end (&call, btinfo);
412 btrace_call_prev (&call, 1);
413 calls = btrace_call_number (&call);
414
415 btrace_insn_end (&insn, btinfo);
416
417 insns = btrace_insn_number (&insn);
418 if (insns != 0)
419 {
420 /* The last instruction does not really belong to the trace. */
421 insns -= 1;
422 }
423 else
424 {
425 unsigned int steps;
426
427 /* Skip gaps at the end. */
428 do
429 {
430 steps = btrace_insn_prev (&insn, 1);
431 if (steps == 0)
432 break;
433
434 insns = btrace_insn_number (&insn);
435 }
436 while (insns == 0);
437 }
438
439 gaps = btinfo->ngaps;
440 }
441
442 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
443 "for thread %s (%s).\n"), insns, calls, gaps,
444 print_thread_id (tp), target_pid_to_str (tp->ptid));
445
446 if (btrace_is_replaying (tp))
447 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
448 btrace_insn_number (btinfo->replay));
449 }
450
451 /* Print a decode error. */
452
453 static void
454 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
455 enum btrace_format format)
456 {
457 const char *errstr;
458 int is_error;
459
460 errstr = _("unknown");
461 is_error = 1;
462
463 switch (format)
464 {
465 default:
466 break;
467
468 case BTRACE_FORMAT_BTS:
469 switch (errcode)
470 {
471 default:
472 break;
473
474 case BDE_BTS_OVERFLOW:
475 errstr = _("instruction overflow");
476 break;
477
478 case BDE_BTS_INSN_SIZE:
479 errstr = _("unknown instruction");
480 break;
481 }
482 break;
483
484 #if defined (HAVE_LIBIPT)
485 case BTRACE_FORMAT_PT:
486 switch (errcode)
487 {
488 case BDE_PT_USER_QUIT:
489 is_error = 0;
490 errstr = _("trace decode cancelled");
491 break;
492
493 case BDE_PT_DISABLED:
494 is_error = 0;
495 errstr = _("disabled");
496 break;
497
498 case BDE_PT_OVERFLOW:
499 is_error = 0;
500 errstr = _("overflow");
501 break;
502
503 default:
504 if (errcode < 0)
505 errstr = pt_errstr (pt_errcode (errcode));
506 break;
507 }
508 break;
509 #endif /* defined (HAVE_LIBIPT) */
510 }
511
512 ui_out_text (uiout, _("["));
513 if (is_error)
514 {
515 ui_out_text (uiout, _("decode error ("));
516 ui_out_field_int (uiout, "errcode", errcode);
517 ui_out_text (uiout, _("): "));
518 }
519 ui_out_text (uiout, errstr);
520 ui_out_text (uiout, _("]\n"));
521 }
522
523 /* Print an unsigned int. */
524
525 static void
526 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
527 {
528 ui_out_field_fmt (uiout, fld, "%u", val);
529 }
530
531 /* A range of source lines. */
532
533 struct btrace_line_range
534 {
535 /* The symtab this line is from. */
536 struct symtab *symtab;
537
538 /* The first line (inclusive). */
539 int begin;
540
541 /* The last line (exclusive). */
542 int end;
543 };
544
545 /* Construct a line range. */
546
547 static struct btrace_line_range
548 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
549 {
550 struct btrace_line_range range;
551
552 range.symtab = symtab;
553 range.begin = begin;
554 range.end = end;
555
556 return range;
557 }
558
559 /* Add a line to a line range. */
560
561 static struct btrace_line_range
562 btrace_line_range_add (struct btrace_line_range range, int line)
563 {
564 if (range.end <= range.begin)
565 {
566 /* This is the first entry. */
567 range.begin = line;
568 range.end = line + 1;
569 }
570 else if (line < range.begin)
571 range.begin = line;
572 else if (range.end < line)
573 range.end = line;
574
575 return range;
576 }
577
578 /* Return non-zero if RANGE is empty, zero otherwise. */
579
580 static int
581 btrace_line_range_is_empty (struct btrace_line_range range)
582 {
583 return range.end <= range.begin;
584 }
585
586 /* Return non-zero if LHS contains RHS, zero otherwise. */
587
588 static int
589 btrace_line_range_contains_range (struct btrace_line_range lhs,
590 struct btrace_line_range rhs)
591 {
592 return ((lhs.symtab == rhs.symtab)
593 && (lhs.begin <= rhs.begin)
594 && (rhs.end <= lhs.end));
595 }
596
597 /* Find the line range associated with PC. */
598
599 static struct btrace_line_range
600 btrace_find_line_range (CORE_ADDR pc)
601 {
602 struct btrace_line_range range;
603 struct linetable_entry *lines;
604 struct linetable *ltable;
605 struct symtab *symtab;
606 int nlines, i;
607
608 symtab = find_pc_line_symtab (pc);
609 if (symtab == NULL)
610 return btrace_mk_line_range (NULL, 0, 0);
611
612 ltable = SYMTAB_LINETABLE (symtab);
613 if (ltable == NULL)
614 return btrace_mk_line_range (symtab, 0, 0);
615
616 nlines = ltable->nitems;
617 lines = ltable->item;
618 if (nlines <= 0)
619 return btrace_mk_line_range (symtab, 0, 0);
620
621 range = btrace_mk_line_range (symtab, 0, 0);
622 for (i = 0; i < nlines - 1; i++)
623 {
624 if ((lines[i].pc == pc) && (lines[i].line != 0))
625 range = btrace_line_range_add (range, lines[i].line);
626 }
627
628 return range;
629 }
630
631 /* Print source lines in LINES to UIOUT.
632
633 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
634 instructions corresponding to that source line. When printing a new source
635 line, we do the cleanups for the open chain and open a new cleanup chain for
636 the new source line. If the source line range in LINES is not empty, this
637 function will leave the cleanup chain for the last printed source line open
638 so instructions can be added to it. */
639
640 static void
641 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
642 struct cleanup **ui_item_chain, int flags)
643 {
644 print_source_lines_flags psl_flags;
645 int line;
646
647 psl_flags = 0;
648 if (flags & DISASSEMBLY_FILENAME)
649 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
650
651 for (line = lines.begin; line < lines.end; ++line)
652 {
653 if (*ui_item_chain != NULL)
654 do_cleanups (*ui_item_chain);
655
656 *ui_item_chain
657 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
658
659 print_source_lines (lines.symtab, line, line + 1, psl_flags);
660
661 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
662 }
663 }
664
665 /* Disassemble a section of the recorded instruction trace. */
666
667 static void
668 btrace_insn_history (struct ui_out *uiout,
669 const struct btrace_thread_info *btinfo,
670 const struct btrace_insn_iterator *begin,
671 const struct btrace_insn_iterator *end, int flags)
672 {
673 struct ui_file *stb;
674 struct cleanup *cleanups, *ui_item_chain;
675 struct disassemble_info di;
676 struct gdbarch *gdbarch;
677 struct btrace_insn_iterator it;
678 struct btrace_line_range last_lines;
679
680 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
681 btrace_insn_number (end));
682
683 flags |= DISASSEMBLY_SPECULATIVE;
684
685 gdbarch = target_gdbarch ();
686 stb = mem_fileopen ();
687 cleanups = make_cleanup_ui_file_delete (stb);
688 di = gdb_disassemble_info (gdbarch, stb);
689 last_lines = btrace_mk_line_range (NULL, 0, 0);
690
691 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
692
693 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
694 instructions corresponding to that line. */
695 ui_item_chain = NULL;
696
697 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
698 {
699 const struct btrace_insn *insn;
700
701 insn = btrace_insn_get (&it);
702
703 /* A NULL instruction indicates a gap in the trace. */
704 if (insn == NULL)
705 {
706 const struct btrace_config *conf;
707
708 conf = btrace_conf (btinfo);
709
710 /* We have trace so we must have a configuration. */
711 gdb_assert (conf != NULL);
712
713 btrace_ui_out_decode_error (uiout, it.function->errcode,
714 conf->format);
715 }
716 else
717 {
718 struct disasm_insn dinsn;
719
720 if ((flags & DISASSEMBLY_SOURCE) != 0)
721 {
722 struct btrace_line_range lines;
723
724 lines = btrace_find_line_range (insn->pc);
725 if (!btrace_line_range_is_empty (lines)
726 && !btrace_line_range_contains_range (last_lines, lines))
727 {
728 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
729 last_lines = lines;
730 }
731 else if (ui_item_chain == NULL)
732 {
733 ui_item_chain
734 = make_cleanup_ui_out_tuple_begin_end (uiout,
735 "src_and_asm_line");
736 /* No source information. */
737 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
738 }
739
740 gdb_assert (ui_item_chain != NULL);
741 }
742
743 memset (&dinsn, 0, sizeof (dinsn));
744 dinsn.number = btrace_insn_number (&it);
745 dinsn.addr = insn->pc;
746
747 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
748 dinsn.is_speculative = 1;
749
750 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
751 }
752 }
753
754 do_cleanups (cleanups);
755 }
756
757 /* The to_insn_history method of target record-btrace. */
758
759 static void
760 record_btrace_insn_history (struct target_ops *self, int size, int flags)
761 {
762 struct btrace_thread_info *btinfo;
763 struct btrace_insn_history *history;
764 struct btrace_insn_iterator begin, end;
765 struct cleanup *uiout_cleanup;
766 struct ui_out *uiout;
767 unsigned int context, covered;
768
769 uiout = current_uiout;
770 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
771 "insn history");
772 context = abs (size);
773 if (context == 0)
774 error (_("Bad record instruction-history-size."));
775
776 btinfo = require_btrace ();
777 history = btinfo->insn_history;
778 if (history == NULL)
779 {
780 struct btrace_insn_iterator *replay;
781
782 DEBUG ("insn-history (0x%x): %d", flags, size);
783
784 /* If we're replaying, we start at the replay position. Otherwise, we
785 start at the tail of the trace. */
786 replay = btinfo->replay;
787 if (replay != NULL)
788 begin = *replay;
789 else
790 btrace_insn_end (&begin, btinfo);
791
792 /* We start from here and expand in the requested direction. Then we
793 expand in the other direction, as well, to fill up any remaining
794 context. */
795 end = begin;
796 if (size < 0)
797 {
798 /* We want the current position covered, as well. */
799 covered = btrace_insn_next (&end, 1);
800 covered += btrace_insn_prev (&begin, context - covered);
801 covered += btrace_insn_next (&end, context - covered);
802 }
803 else
804 {
805 covered = btrace_insn_next (&end, context);
806 covered += btrace_insn_prev (&begin, context - covered);
807 }
808 }
809 else
810 {
811 begin = history->begin;
812 end = history->end;
813
814 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
815 btrace_insn_number (&begin), btrace_insn_number (&end));
816
817 if (size < 0)
818 {
819 end = begin;
820 covered = btrace_insn_prev (&begin, context);
821 }
822 else
823 {
824 begin = end;
825 covered = btrace_insn_next (&end, context);
826 }
827 }
828
829 if (covered > 0)
830 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
831 else
832 {
833 if (size < 0)
834 printf_unfiltered (_("At the start of the branch trace record.\n"));
835 else
836 printf_unfiltered (_("At the end of the branch trace record.\n"));
837 }
838
839 btrace_set_insn_history (btinfo, &begin, &end);
840 do_cleanups (uiout_cleanup);
841 }
842
843 /* The to_insn_history_range method of target record-btrace. */
844
845 static void
846 record_btrace_insn_history_range (struct target_ops *self,
847 ULONGEST from, ULONGEST to, int flags)
848 {
849 struct btrace_thread_info *btinfo;
850 struct btrace_insn_history *history;
851 struct btrace_insn_iterator begin, end;
852 struct cleanup *uiout_cleanup;
853 struct ui_out *uiout;
854 unsigned int low, high;
855 int found;
856
857 uiout = current_uiout;
858 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
859 "insn history");
860 low = from;
861 high = to;
862
863 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
864
865 /* Check for wrap-arounds. */
866 if (low != from || high != to)
867 error (_("Bad range."));
868
869 if (high < low)
870 error (_("Bad range."));
871
872 btinfo = require_btrace ();
873
874 found = btrace_find_insn_by_number (&begin, btinfo, low);
875 if (found == 0)
876 error (_("Range out of bounds."));
877
878 found = btrace_find_insn_by_number (&end, btinfo, high);
879 if (found == 0)
880 {
881 /* Silently truncate the range. */
882 btrace_insn_end (&end, btinfo);
883 }
884 else
885 {
886 /* We want both begin and end to be inclusive. */
887 btrace_insn_next (&end, 1);
888 }
889
890 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
891 btrace_set_insn_history (btinfo, &begin, &end);
892
893 do_cleanups (uiout_cleanup);
894 }
895
896 /* The to_insn_history_from method of target record-btrace. */
897
898 static void
899 record_btrace_insn_history_from (struct target_ops *self,
900 ULONGEST from, int size, int flags)
901 {
902 ULONGEST begin, end, context;
903
904 context = abs (size);
905 if (context == 0)
906 error (_("Bad record instruction-history-size."));
907
908 if (size < 0)
909 {
910 end = from;
911
912 if (from < context)
913 begin = 0;
914 else
915 begin = from - context + 1;
916 }
917 else
918 {
919 begin = from;
920 end = from + context - 1;
921
922 /* Check for wrap-around. */
923 if (end < begin)
924 end = ULONGEST_MAX;
925 }
926
927 record_btrace_insn_history_range (self, begin, end, flags);
928 }
929
930 /* Print the instruction number range for a function call history line. */
931
932 static void
933 btrace_call_history_insn_range (struct ui_out *uiout,
934 const struct btrace_function *bfun)
935 {
936 unsigned int begin, end, size;
937
938 size = VEC_length (btrace_insn_s, bfun->insn);
939 gdb_assert (size > 0);
940
941 begin = bfun->insn_offset;
942 end = begin + size - 1;
943
944 ui_out_field_uint (uiout, "insn begin", begin);
945 ui_out_text (uiout, ",");
946 ui_out_field_uint (uiout, "insn end", end);
947 }
948
949 /* Compute the lowest and highest source line for the instructions in BFUN
950 and return them in PBEGIN and PEND.
951 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
952 result from inlining or macro expansion. */
953
954 static void
955 btrace_compute_src_line_range (const struct btrace_function *bfun,
956 int *pbegin, int *pend)
957 {
958 struct btrace_insn *insn;
959 struct symtab *symtab;
960 struct symbol *sym;
961 unsigned int idx;
962 int begin, end;
963
964 begin = INT_MAX;
965 end = INT_MIN;
966
967 sym = bfun->sym;
968 if (sym == NULL)
969 goto out;
970
971 symtab = symbol_symtab (sym);
972
973 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
974 {
975 struct symtab_and_line sal;
976
977 sal = find_pc_line (insn->pc, 0);
978 if (sal.symtab != symtab || sal.line == 0)
979 continue;
980
981 begin = min (begin, sal.line);
982 end = max (end, sal.line);
983 }
984
985 out:
986 *pbegin = begin;
987 *pend = end;
988 }
989
990 /* Print the source line information for a function call history line. */
991
992 static void
993 btrace_call_history_src_line (struct ui_out *uiout,
994 const struct btrace_function *bfun)
995 {
996 struct symbol *sym;
997 int begin, end;
998
999 sym = bfun->sym;
1000 if (sym == NULL)
1001 return;
1002
1003 ui_out_field_string (uiout, "file",
1004 symtab_to_filename_for_display (symbol_symtab (sym)));
1005
1006 btrace_compute_src_line_range (bfun, &begin, &end);
1007 if (end < begin)
1008 return;
1009
1010 ui_out_text (uiout, ":");
1011 ui_out_field_int (uiout, "min line", begin);
1012
1013 if (end == begin)
1014 return;
1015
1016 ui_out_text (uiout, ",");
1017 ui_out_field_int (uiout, "max line", end);
1018 }
1019
1020 /* Get the name of a branch trace function. */
1021
1022 static const char *
1023 btrace_get_bfun_name (const struct btrace_function *bfun)
1024 {
1025 struct minimal_symbol *msym;
1026 struct symbol *sym;
1027
1028 if (bfun == NULL)
1029 return "??";
1030
1031 msym = bfun->msym;
1032 sym = bfun->sym;
1033
1034 if (sym != NULL)
1035 return SYMBOL_PRINT_NAME (sym);
1036 else if (msym != NULL)
1037 return MSYMBOL_PRINT_NAME (msym);
1038 else
1039 return "??";
1040 }
1041
1042 /* Disassemble a section of the recorded function trace. */
1043
1044 static void
1045 btrace_call_history (struct ui_out *uiout,
1046 const struct btrace_thread_info *btinfo,
1047 const struct btrace_call_iterator *begin,
1048 const struct btrace_call_iterator *end,
1049 int int_flags)
1050 {
1051 struct btrace_call_iterator it;
1052 record_print_flags flags = (enum record_print_flag) int_flags;
1053
1054 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1055 btrace_call_number (end));
1056
1057 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1058 {
1059 const struct btrace_function *bfun;
1060 struct minimal_symbol *msym;
1061 struct symbol *sym;
1062
1063 bfun = btrace_call_get (&it);
1064 sym = bfun->sym;
1065 msym = bfun->msym;
1066
1067 /* Print the function index. */
1068 ui_out_field_uint (uiout, "index", bfun->number);
1069 ui_out_text (uiout, "\t");
1070
1071 /* Indicate gaps in the trace. */
1072 if (bfun->errcode != 0)
1073 {
1074 const struct btrace_config *conf;
1075
1076 conf = btrace_conf (btinfo);
1077
1078 /* We have trace so we must have a configuration. */
1079 gdb_assert (conf != NULL);
1080
1081 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1082
1083 continue;
1084 }
1085
1086 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1087 {
1088 int level = bfun->level + btinfo->level, i;
1089
1090 for (i = 0; i < level; ++i)
1091 ui_out_text (uiout, " ");
1092 }
1093
1094 if (sym != NULL)
1095 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1096 else if (msym != NULL)
1097 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1098 else if (!ui_out_is_mi_like_p (uiout))
1099 ui_out_field_string (uiout, "function", "??");
1100
1101 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1102 {
1103 ui_out_text (uiout, _("\tinst "));
1104 btrace_call_history_insn_range (uiout, bfun);
1105 }
1106
1107 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1108 {
1109 ui_out_text (uiout, _("\tat "));
1110 btrace_call_history_src_line (uiout, bfun);
1111 }
1112
1113 ui_out_text (uiout, "\n");
1114 }
1115 }
1116
1117 /* The to_call_history method of target record-btrace. */
1118
1119 static void
1120 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1121 {
1122 struct btrace_thread_info *btinfo;
1123 struct btrace_call_history *history;
1124 struct btrace_call_iterator begin, end;
1125 struct cleanup *uiout_cleanup;
1126 struct ui_out *uiout;
1127 unsigned int context, covered;
1128 record_print_flags flags = (enum record_print_flag) int_flags;
1129
1130 uiout = current_uiout;
1131 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1132 "insn history");
1133 context = abs (size);
1134 if (context == 0)
1135 error (_("Bad record function-call-history-size."));
1136
1137 btinfo = require_btrace ();
1138 history = btinfo->call_history;
1139 if (history == NULL)
1140 {
1141 struct btrace_insn_iterator *replay;
1142
1143 DEBUG ("call-history (0x%x): %d", int_flags, size);
1144
1145 /* If we're replaying, we start at the replay position. Otherwise, we
1146 start at the tail of the trace. */
1147 replay = btinfo->replay;
1148 if (replay != NULL)
1149 {
1150 begin.function = replay->function;
1151 begin.btinfo = btinfo;
1152 }
1153 else
1154 btrace_call_end (&begin, btinfo);
1155
1156 /* We start from here and expand in the requested direction. Then we
1157 expand in the other direction, as well, to fill up any remaining
1158 context. */
1159 end = begin;
1160 if (size < 0)
1161 {
1162 /* We want the current position covered, as well. */
1163 covered = btrace_call_next (&end, 1);
1164 covered += btrace_call_prev (&begin, context - covered);
1165 covered += btrace_call_next (&end, context - covered);
1166 }
1167 else
1168 {
1169 covered = btrace_call_next (&end, context);
1170 covered += btrace_call_prev (&begin, context- covered);
1171 }
1172 }
1173 else
1174 {
1175 begin = history->begin;
1176 end = history->end;
1177
1178 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1179 btrace_call_number (&begin), btrace_call_number (&end));
1180
1181 if (size < 0)
1182 {
1183 end = begin;
1184 covered = btrace_call_prev (&begin, context);
1185 }
1186 else
1187 {
1188 begin = end;
1189 covered = btrace_call_next (&end, context);
1190 }
1191 }
1192
1193 if (covered > 0)
1194 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1195 else
1196 {
1197 if (size < 0)
1198 printf_unfiltered (_("At the start of the branch trace record.\n"));
1199 else
1200 printf_unfiltered (_("At the end of the branch trace record.\n"));
1201 }
1202
1203 btrace_set_call_history (btinfo, &begin, &end);
1204 do_cleanups (uiout_cleanup);
1205 }
1206
1207 /* The to_call_history_range method of target record-btrace. */
1208
1209 static void
1210 record_btrace_call_history_range (struct target_ops *self,
1211 ULONGEST from, ULONGEST to,
1212 int int_flags)
1213 {
1214 struct btrace_thread_info *btinfo;
1215 struct btrace_call_history *history;
1216 struct btrace_call_iterator begin, end;
1217 struct cleanup *uiout_cleanup;
1218 struct ui_out *uiout;
1219 unsigned int low, high;
1220 int found;
1221 record_print_flags flags = (enum record_print_flag) int_flags;
1222
1223 uiout = current_uiout;
1224 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1225 "func history");
1226 low = from;
1227 high = to;
1228
1229 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1230
1231 /* Check for wrap-arounds. */
1232 if (low != from || high != to)
1233 error (_("Bad range."));
1234
1235 if (high < low)
1236 error (_("Bad range."));
1237
1238 btinfo = require_btrace ();
1239
1240 found = btrace_find_call_by_number (&begin, btinfo, low);
1241 if (found == 0)
1242 error (_("Range out of bounds."));
1243
1244 found = btrace_find_call_by_number (&end, btinfo, high);
1245 if (found == 0)
1246 {
1247 /* Silently truncate the range. */
1248 btrace_call_end (&end, btinfo);
1249 }
1250 else
1251 {
1252 /* We want both begin and end to be inclusive. */
1253 btrace_call_next (&end, 1);
1254 }
1255
1256 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1257 btrace_set_call_history (btinfo, &begin, &end);
1258
1259 do_cleanups (uiout_cleanup);
1260 }
1261
1262 /* The to_call_history_from method of target record-btrace. */
1263
1264 static void
1265 record_btrace_call_history_from (struct target_ops *self,
1266 ULONGEST from, int size,
1267 int int_flags)
1268 {
1269 ULONGEST begin, end, context;
1270 record_print_flags flags = (enum record_print_flag) int_flags;
1271
1272 context = abs (size);
1273 if (context == 0)
1274 error (_("Bad record function-call-history-size."));
1275
1276 if (size < 0)
1277 {
1278 end = from;
1279
1280 if (from < context)
1281 begin = 0;
1282 else
1283 begin = from - context + 1;
1284 }
1285 else
1286 {
1287 begin = from;
1288 end = from + context - 1;
1289
1290 /* Check for wrap-around. */
1291 if (end < begin)
1292 end = ULONGEST_MAX;
1293 }
1294
1295 record_btrace_call_history_range (self, begin, end, flags);
1296 }
1297
1298 /* The to_record_is_replaying method of target record-btrace. */
1299
1300 static int
1301 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1302 {
1303 struct thread_info *tp;
1304
1305 ALL_NON_EXITED_THREADS (tp)
1306 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1307 return 1;
1308
1309 return 0;
1310 }
1311
1312 /* The to_record_will_replay method of target record-btrace. */
1313
1314 static int
1315 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1316 {
1317 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1318 }
1319
1320 /* The to_xfer_partial method of target record-btrace. */
1321
1322 static enum target_xfer_status
1323 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1324 const char *annex, gdb_byte *readbuf,
1325 const gdb_byte *writebuf, ULONGEST offset,
1326 ULONGEST len, ULONGEST *xfered_len)
1327 {
1328 struct target_ops *t;
1329
1330 /* Filter out requests that don't make sense during replay. */
1331 if (replay_memory_access == replay_memory_access_read_only
1332 && !record_btrace_generating_corefile
1333 && record_btrace_is_replaying (ops, inferior_ptid))
1334 {
1335 switch (object)
1336 {
1337 case TARGET_OBJECT_MEMORY:
1338 {
1339 struct target_section *section;
1340
1341 /* We do not allow writing memory in general. */
1342 if (writebuf != NULL)
1343 {
1344 *xfered_len = len;
1345 return TARGET_XFER_UNAVAILABLE;
1346 }
1347
1348 /* We allow reading readonly memory. */
1349 section = target_section_by_addr (ops, offset);
1350 if (section != NULL)
1351 {
1352 /* Check if the section we found is readonly. */
1353 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1354 section->the_bfd_section)
1355 & SEC_READONLY) != 0)
1356 {
1357 /* Truncate the request to fit into this section. */
1358 len = min (len, section->endaddr - offset);
1359 break;
1360 }
1361 }
1362
1363 *xfered_len = len;
1364 return TARGET_XFER_UNAVAILABLE;
1365 }
1366 }
1367 }
1368
1369 /* Forward the request. */
1370 ops = ops->beneath;
1371 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1372 offset, len, xfered_len);
1373 }
1374
1375 /* The to_insert_breakpoint method of target record-btrace. */
1376
1377 static int
1378 record_btrace_insert_breakpoint (struct target_ops *ops,
1379 struct gdbarch *gdbarch,
1380 struct bp_target_info *bp_tgt)
1381 {
1382 const char *old;
1383 int ret;
1384
1385 /* Inserting breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
1387 old = replay_memory_access;
1388 replay_memory_access = replay_memory_access_read_write;
1389
1390 ret = 0;
1391 TRY
1392 {
1393 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1394 }
1395 CATCH (except, RETURN_MASK_ALL)
1396 {
1397 replay_memory_access = old;
1398 throw_exception (except);
1399 }
1400 END_CATCH
1401 replay_memory_access = old;
1402
1403 return ret;
1404 }
1405
1406 /* The to_remove_breakpoint method of target record-btrace. */
1407
1408 static int
1409 record_btrace_remove_breakpoint (struct target_ops *ops,
1410 struct gdbarch *gdbarch,
1411 struct bp_target_info *bp_tgt)
1412 {
1413 const char *old;
1414 int ret;
1415
1416 /* Removing breakpoints requires accessing memory. Allow it for the
1417 duration of this function. */
1418 old = replay_memory_access;
1419 replay_memory_access = replay_memory_access_read_write;
1420
1421 ret = 0;
1422 TRY
1423 {
1424 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1425 }
1426 CATCH (except, RETURN_MASK_ALL)
1427 {
1428 replay_memory_access = old;
1429 throw_exception (except);
1430 }
1431 END_CATCH
1432 replay_memory_access = old;
1433
1434 return ret;
1435 }
1436
1437 /* The to_fetch_registers method of target record-btrace. */
1438
1439 static void
1440 record_btrace_fetch_registers (struct target_ops *ops,
1441 struct regcache *regcache, int regno)
1442 {
1443 struct btrace_insn_iterator *replay;
1444 struct thread_info *tp;
1445
1446 tp = find_thread_ptid (inferior_ptid);
1447 gdb_assert (tp != NULL);
1448
1449 replay = tp->btrace.replay;
1450 if (replay != NULL && !record_btrace_generating_corefile)
1451 {
1452 const struct btrace_insn *insn;
1453 struct gdbarch *gdbarch;
1454 int pcreg;
1455
1456 gdbarch = get_regcache_arch (regcache);
1457 pcreg = gdbarch_pc_regnum (gdbarch);
1458 if (pcreg < 0)
1459 return;
1460
1461 /* We can only provide the PC register. */
1462 if (regno >= 0 && regno != pcreg)
1463 return;
1464
1465 insn = btrace_insn_get (replay);
1466 gdb_assert (insn != NULL);
1467
1468 regcache_raw_supply (regcache, regno, &insn->pc);
1469 }
1470 else
1471 {
1472 struct target_ops *t = ops->beneath;
1473
1474 t->to_fetch_registers (t, regcache, regno);
1475 }
1476 }
1477
1478 /* The to_store_registers method of target record-btrace. */
1479
1480 static void
1481 record_btrace_store_registers (struct target_ops *ops,
1482 struct regcache *regcache, int regno)
1483 {
1484 struct target_ops *t;
1485
1486 if (!record_btrace_generating_corefile
1487 && record_btrace_is_replaying (ops, inferior_ptid))
1488 error (_("Cannot write registers while replaying."));
1489
1490 gdb_assert (may_write_registers != 0);
1491
1492 t = ops->beneath;
1493 t->to_store_registers (t, regcache, regno);
1494 }
1495
1496 /* The to_prepare_to_store method of target record-btrace. */
1497
1498 static void
1499 record_btrace_prepare_to_store (struct target_ops *ops,
1500 struct regcache *regcache)
1501 {
1502 struct target_ops *t;
1503
1504 if (!record_btrace_generating_corefile
1505 && record_btrace_is_replaying (ops, inferior_ptid))
1506 return;
1507
1508 t = ops->beneath;
1509 t->to_prepare_to_store (t, regcache);
1510 }
1511
1512 /* The branch trace frame cache. */
1513
1514 struct btrace_frame_cache
1515 {
1516 /* The thread. */
1517 struct thread_info *tp;
1518
1519 /* The frame info. */
1520 struct frame_info *frame;
1521
1522 /* The branch trace function segment. */
1523 const struct btrace_function *bfun;
1524 };
1525
1526 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1527
1528 static htab_t bfcache;
1529
1530 /* hash_f for htab_create_alloc of bfcache. */
1531
1532 static hashval_t
1533 bfcache_hash (const void *arg)
1534 {
1535 const struct btrace_frame_cache *cache
1536 = (const struct btrace_frame_cache *) arg;
1537
1538 return htab_hash_pointer (cache->frame);
1539 }
1540
1541 /* eq_f for htab_create_alloc of bfcache. */
1542
1543 static int
1544 bfcache_eq (const void *arg1, const void *arg2)
1545 {
1546 const struct btrace_frame_cache *cache1
1547 = (const struct btrace_frame_cache *) arg1;
1548 const struct btrace_frame_cache *cache2
1549 = (const struct btrace_frame_cache *) arg2;
1550
1551 return cache1->frame == cache2->frame;
1552 }
1553
1554 /* Create a new btrace frame cache. */
1555
1556 static struct btrace_frame_cache *
1557 bfcache_new (struct frame_info *frame)
1558 {
1559 struct btrace_frame_cache *cache;
1560 void **slot;
1561
1562 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1563 cache->frame = frame;
1564
1565 slot = htab_find_slot (bfcache, cache, INSERT);
1566 gdb_assert (*slot == NULL);
1567 *slot = cache;
1568
1569 return cache;
1570 }
1571
1572 /* Extract the branch trace function from a branch trace frame. */
1573
1574 static const struct btrace_function *
1575 btrace_get_frame_function (struct frame_info *frame)
1576 {
1577 const struct btrace_frame_cache *cache;
1578 const struct btrace_function *bfun;
1579 struct btrace_frame_cache pattern;
1580 void **slot;
1581
1582 pattern.frame = frame;
1583
1584 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1585 if (slot == NULL)
1586 return NULL;
1587
1588 cache = (const struct btrace_frame_cache *) *slot;
1589 return cache->bfun;
1590 }
1591
1592 /* Implement stop_reason method for record_btrace_frame_unwind. */
1593
1594 static enum unwind_stop_reason
1595 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1596 void **this_cache)
1597 {
1598 const struct btrace_frame_cache *cache;
1599 const struct btrace_function *bfun;
1600
1601 cache = (const struct btrace_frame_cache *) *this_cache;
1602 bfun = cache->bfun;
1603 gdb_assert (bfun != NULL);
1604
1605 if (bfun->up == NULL)
1606 return UNWIND_UNAVAILABLE;
1607
1608 return UNWIND_NO_REASON;
1609 }
1610
1611 /* Implement this_id method for record_btrace_frame_unwind. */
1612
1613 static void
1614 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1615 struct frame_id *this_id)
1616 {
1617 const struct btrace_frame_cache *cache;
1618 const struct btrace_function *bfun;
1619 CORE_ADDR code, special;
1620
1621 cache = (const struct btrace_frame_cache *) *this_cache;
1622
1623 bfun = cache->bfun;
1624 gdb_assert (bfun != NULL);
1625
1626 while (bfun->segment.prev != NULL)
1627 bfun = bfun->segment.prev;
1628
1629 code = get_frame_func (this_frame);
1630 special = bfun->number;
1631
1632 *this_id = frame_id_build_unavailable_stack_special (code, special);
1633
1634 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1635 btrace_get_bfun_name (cache->bfun),
1636 core_addr_to_string_nz (this_id->code_addr),
1637 core_addr_to_string_nz (this_id->special_addr));
1638 }
1639
1640 /* Implement prev_register method for record_btrace_frame_unwind. */
1641
1642 static struct value *
1643 record_btrace_frame_prev_register (struct frame_info *this_frame,
1644 void **this_cache,
1645 int regnum)
1646 {
1647 const struct btrace_frame_cache *cache;
1648 const struct btrace_function *bfun, *caller;
1649 const struct btrace_insn *insn;
1650 struct gdbarch *gdbarch;
1651 CORE_ADDR pc;
1652 int pcreg;
1653
1654 gdbarch = get_frame_arch (this_frame);
1655 pcreg = gdbarch_pc_regnum (gdbarch);
1656 if (pcreg < 0 || regnum != pcreg)
1657 throw_error (NOT_AVAILABLE_ERROR,
1658 _("Registers are not available in btrace record history"));
1659
1660 cache = (const struct btrace_frame_cache *) *this_cache;
1661 bfun = cache->bfun;
1662 gdb_assert (bfun != NULL);
1663
1664 caller = bfun->up;
1665 if (caller == NULL)
1666 throw_error (NOT_AVAILABLE_ERROR,
1667 _("No caller in btrace record history"));
1668
1669 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1670 {
1671 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1672 pc = insn->pc;
1673 }
1674 else
1675 {
1676 insn = VEC_last (btrace_insn_s, caller->insn);
1677 pc = insn->pc;
1678
1679 pc += gdb_insn_length (gdbarch, pc);
1680 }
1681
1682 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1683 btrace_get_bfun_name (bfun), bfun->level,
1684 core_addr_to_string_nz (pc));
1685
1686 return frame_unwind_got_address (this_frame, regnum, pc);
1687 }
1688
1689 /* Implement sniffer method for record_btrace_frame_unwind. */
1690
1691 static int
1692 record_btrace_frame_sniffer (const struct frame_unwind *self,
1693 struct frame_info *this_frame,
1694 void **this_cache)
1695 {
1696 const struct btrace_function *bfun;
1697 struct btrace_frame_cache *cache;
1698 struct thread_info *tp;
1699 struct frame_info *next;
1700
1701 /* THIS_FRAME does not contain a reference to its thread. */
1702 tp = find_thread_ptid (inferior_ptid);
1703 gdb_assert (tp != NULL);
1704
1705 bfun = NULL;
1706 next = get_next_frame (this_frame);
1707 if (next == NULL)
1708 {
1709 const struct btrace_insn_iterator *replay;
1710
1711 replay = tp->btrace.replay;
1712 if (replay != NULL)
1713 bfun = replay->function;
1714 }
1715 else
1716 {
1717 const struct btrace_function *callee;
1718
1719 callee = btrace_get_frame_function (next);
1720 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1721 bfun = callee->up;
1722 }
1723
1724 if (bfun == NULL)
1725 return 0;
1726
1727 DEBUG ("[frame] sniffed frame for %s on level %d",
1728 btrace_get_bfun_name (bfun), bfun->level);
1729
1730 /* This is our frame. Initialize the frame cache. */
1731 cache = bfcache_new (this_frame);
1732 cache->tp = tp;
1733 cache->bfun = bfun;
1734
1735 *this_cache = cache;
1736 return 1;
1737 }
1738
1739 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1740
1741 static int
1742 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1743 struct frame_info *this_frame,
1744 void **this_cache)
1745 {
1746 const struct btrace_function *bfun, *callee;
1747 struct btrace_frame_cache *cache;
1748 struct frame_info *next;
1749
1750 next = get_next_frame (this_frame);
1751 if (next == NULL)
1752 return 0;
1753
1754 callee = btrace_get_frame_function (next);
1755 if (callee == NULL)
1756 return 0;
1757
1758 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1759 return 0;
1760
1761 bfun = callee->up;
1762 if (bfun == NULL)
1763 return 0;
1764
1765 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1766 btrace_get_bfun_name (bfun), bfun->level);
1767
1768 /* This is our frame. Initialize the frame cache. */
1769 cache = bfcache_new (this_frame);
1770 cache->tp = find_thread_ptid (inferior_ptid);
1771 cache->bfun = bfun;
1772
1773 *this_cache = cache;
1774 return 1;
1775 }
1776
1777 static void
1778 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1779 {
1780 struct btrace_frame_cache *cache;
1781 void **slot;
1782
1783 cache = (struct btrace_frame_cache *) this_cache;
1784
1785 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1786 gdb_assert (slot != NULL);
1787
1788 htab_remove_elt (bfcache, cache);
1789 }
1790
1791 /* btrace recording does not store previous memory content, neither the stack
1792 frames content. Any unwinding would return errorneous results as the stack
1793 contents no longer matches the changed PC value restored from history.
1794 Therefore this unwinder reports any possibly unwound registers as
1795 <unavailable>. */
1796
1797 const struct frame_unwind record_btrace_frame_unwind =
1798 {
1799 NORMAL_FRAME,
1800 record_btrace_frame_unwind_stop_reason,
1801 record_btrace_frame_this_id,
1802 record_btrace_frame_prev_register,
1803 NULL,
1804 record_btrace_frame_sniffer,
1805 record_btrace_frame_dealloc_cache
1806 };
1807
1808 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1809 {
1810 TAILCALL_FRAME,
1811 record_btrace_frame_unwind_stop_reason,
1812 record_btrace_frame_this_id,
1813 record_btrace_frame_prev_register,
1814 NULL,
1815 record_btrace_tailcall_frame_sniffer,
1816 record_btrace_frame_dealloc_cache
1817 };
1818
1819 /* Implement the to_get_unwinder method. */
1820
1821 static const struct frame_unwind *
1822 record_btrace_to_get_unwinder (struct target_ops *self)
1823 {
1824 return &record_btrace_frame_unwind;
1825 }
1826
1827 /* Implement the to_get_tailcall_unwinder method. */
1828
1829 static const struct frame_unwind *
1830 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1831 {
1832 return &record_btrace_tailcall_frame_unwind;
1833 }
1834
1835 /* Return a human-readable string for FLAG. */
1836
1837 static const char *
1838 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1839 {
1840 switch (flag)
1841 {
1842 case BTHR_STEP:
1843 return "step";
1844
1845 case BTHR_RSTEP:
1846 return "reverse-step";
1847
1848 case BTHR_CONT:
1849 return "cont";
1850
1851 case BTHR_RCONT:
1852 return "reverse-cont";
1853
1854 case BTHR_STOP:
1855 return "stop";
1856 }
1857
1858 return "<invalid>";
1859 }
1860
1861 /* Indicate that TP should be resumed according to FLAG. */
1862
1863 static void
1864 record_btrace_resume_thread (struct thread_info *tp,
1865 enum btrace_thread_flag flag)
1866 {
1867 struct btrace_thread_info *btinfo;
1868
1869 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1870 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1871
1872 btinfo = &tp->btrace;
1873
1874 /* Fetch the latest branch trace. */
1875 btrace_fetch (tp);
1876
1877 /* A resume request overwrites a preceding resume or stop request. */
1878 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1879 btinfo->flags |= flag;
1880 }
1881
1882 /* Get the current frame for TP. */
1883
1884 static struct frame_info *
1885 get_thread_current_frame (struct thread_info *tp)
1886 {
1887 struct frame_info *frame;
1888 ptid_t old_inferior_ptid;
1889 int executing;
1890
1891 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1892 old_inferior_ptid = inferior_ptid;
1893 inferior_ptid = tp->ptid;
1894
1895 /* Clear the executing flag to allow changes to the current frame.
1896 We are not actually running, yet. We just started a reverse execution
1897 command or a record goto command.
1898 For the latter, EXECUTING is false and this has no effect.
1899 For the former, EXECUTING is true and we're in to_wait, about to
1900 move the thread. Since we need to recompute the stack, we temporarily
1901 set EXECUTING to flase. */
1902 executing = is_executing (inferior_ptid);
1903 set_executing (inferior_ptid, 0);
1904
1905 frame = NULL;
1906 TRY
1907 {
1908 frame = get_current_frame ();
1909 }
1910 CATCH (except, RETURN_MASK_ALL)
1911 {
1912 /* Restore the previous execution state. */
1913 set_executing (inferior_ptid, executing);
1914
1915 /* Restore the previous inferior_ptid. */
1916 inferior_ptid = old_inferior_ptid;
1917
1918 throw_exception (except);
1919 }
1920 END_CATCH
1921
1922 /* Restore the previous execution state. */
1923 set_executing (inferior_ptid, executing);
1924
1925 /* Restore the previous inferior_ptid. */
1926 inferior_ptid = old_inferior_ptid;
1927
1928 return frame;
1929 }
1930
1931 /* Start replaying a thread. */
1932
1933 static struct btrace_insn_iterator *
1934 record_btrace_start_replaying (struct thread_info *tp)
1935 {
1936 struct btrace_insn_iterator *replay;
1937 struct btrace_thread_info *btinfo;
1938
1939 btinfo = &tp->btrace;
1940 replay = NULL;
1941
1942 /* We can't start replaying without trace. */
1943 if (btinfo->begin == NULL)
1944 return NULL;
1945
1946 /* GDB stores the current frame_id when stepping in order to detects steps
1947 into subroutines.
1948 Since frames are computed differently when we're replaying, we need to
1949 recompute those stored frames and fix them up so we can still detect
1950 subroutines after we started replaying. */
1951 TRY
1952 {
1953 struct frame_info *frame;
1954 struct frame_id frame_id;
1955 int upd_step_frame_id, upd_step_stack_frame_id;
1956
1957 /* The current frame without replaying - computed via normal unwind. */
1958 frame = get_thread_current_frame (tp);
1959 frame_id = get_frame_id (frame);
1960
1961 /* Check if we need to update any stepping-related frame id's. */
1962 upd_step_frame_id = frame_id_eq (frame_id,
1963 tp->control.step_frame_id);
1964 upd_step_stack_frame_id = frame_id_eq (frame_id,
1965 tp->control.step_stack_frame_id);
1966
1967 /* We start replaying at the end of the branch trace. This corresponds
1968 to the current instruction. */
1969 replay = XNEW (struct btrace_insn_iterator);
1970 btrace_insn_end (replay, btinfo);
1971
1972 /* Skip gaps at the end of the trace. */
1973 while (btrace_insn_get (replay) == NULL)
1974 {
1975 unsigned int steps;
1976
1977 steps = btrace_insn_prev (replay, 1);
1978 if (steps == 0)
1979 error (_("No trace."));
1980 }
1981
1982 /* We're not replaying, yet. */
1983 gdb_assert (btinfo->replay == NULL);
1984 btinfo->replay = replay;
1985
1986 /* Make sure we're not using any stale registers. */
1987 registers_changed_ptid (tp->ptid);
1988
1989 /* The current frame with replaying - computed via btrace unwind. */
1990 frame = get_thread_current_frame (tp);
1991 frame_id = get_frame_id (frame);
1992
1993 /* Replace stepping related frames where necessary. */
1994 if (upd_step_frame_id)
1995 tp->control.step_frame_id = frame_id;
1996 if (upd_step_stack_frame_id)
1997 tp->control.step_stack_frame_id = frame_id;
1998 }
1999 CATCH (except, RETURN_MASK_ALL)
2000 {
2001 xfree (btinfo->replay);
2002 btinfo->replay = NULL;
2003
2004 registers_changed_ptid (tp->ptid);
2005
2006 throw_exception (except);
2007 }
2008 END_CATCH
2009
2010 return replay;
2011 }
2012
2013 /* Stop replaying a thread. */
2014
2015 static void
2016 record_btrace_stop_replaying (struct thread_info *tp)
2017 {
2018 struct btrace_thread_info *btinfo;
2019
2020 btinfo = &tp->btrace;
2021
2022 xfree (btinfo->replay);
2023 btinfo->replay = NULL;
2024
2025 /* Make sure we're not leaving any stale registers. */
2026 registers_changed_ptid (tp->ptid);
2027 }
2028
2029 /* Stop replaying TP if it is at the end of its execution history. */
2030
2031 static void
2032 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2033 {
2034 struct btrace_insn_iterator *replay, end;
2035 struct btrace_thread_info *btinfo;
2036
2037 btinfo = &tp->btrace;
2038 replay = btinfo->replay;
2039
2040 if (replay == NULL)
2041 return;
2042
2043 btrace_insn_end (&end, btinfo);
2044
2045 if (btrace_insn_cmp (replay, &end) == 0)
2046 record_btrace_stop_replaying (tp);
2047 }
2048
2049 /* The to_resume method of target record-btrace. */
2050
2051 static void
2052 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2053 enum gdb_signal signal)
2054 {
2055 struct thread_info *tp;
2056 enum btrace_thread_flag flag, cflag;
2057
2058 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2059 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2060 step ? "step" : "cont");
2061
2062 /* Store the execution direction of the last resume.
2063
2064 If there is more than one to_resume call, we have to rely on infrun
2065 to not change the execution direction in-between. */
2066 record_btrace_resume_exec_dir = execution_direction;
2067
2068 /* As long as we're not replaying, just forward the request.
2069
2070 For non-stop targets this means that no thread is replaying. In order to
2071 make progress, we may need to explicitly move replaying threads to the end
2072 of their execution history. */
2073 if ((execution_direction != EXEC_REVERSE)
2074 && !record_btrace_is_replaying (ops, minus_one_ptid))
2075 {
2076 ops = ops->beneath;
2077 ops->to_resume (ops, ptid, step, signal);
2078 return;
2079 }
2080
2081 /* Compute the btrace thread flag for the requested move. */
2082 if (execution_direction == EXEC_REVERSE)
2083 {
2084 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2085 cflag = BTHR_RCONT;
2086 }
2087 else
2088 {
2089 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2090 cflag = BTHR_CONT;
2091 }
2092
2093 /* We just indicate the resume intent here. The actual stepping happens in
2094 record_btrace_wait below.
2095
2096 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2097 if (!target_is_non_stop_p ())
2098 {
2099 gdb_assert (ptid_match (inferior_ptid, ptid));
2100
2101 ALL_NON_EXITED_THREADS (tp)
2102 if (ptid_match (tp->ptid, ptid))
2103 {
2104 if (ptid_match (tp->ptid, inferior_ptid))
2105 record_btrace_resume_thread (tp, flag);
2106 else
2107 record_btrace_resume_thread (tp, cflag);
2108 }
2109 }
2110 else
2111 {
2112 ALL_NON_EXITED_THREADS (tp)
2113 if (ptid_match (tp->ptid, ptid))
2114 record_btrace_resume_thread (tp, flag);
2115 }
2116
2117 /* Async support. */
2118 if (target_can_async_p ())
2119 {
2120 target_async (1);
2121 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2122 }
2123 }
2124
2125 /* Cancel resuming TP. */
2126
2127 static void
2128 record_btrace_cancel_resume (struct thread_info *tp)
2129 {
2130 enum btrace_thread_flag flags;
2131
2132 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2133 if (flags == 0)
2134 return;
2135
2136 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2137 print_thread_id (tp),
2138 target_pid_to_str (tp->ptid), flags,
2139 btrace_thread_flag_to_str (flags));
2140
2141 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2142 record_btrace_stop_replaying_at_end (tp);
2143 }
2144
2145 /* Return a target_waitstatus indicating that we ran out of history. */
2146
2147 static struct target_waitstatus
2148 btrace_step_no_history (void)
2149 {
2150 struct target_waitstatus status;
2151
2152 status.kind = TARGET_WAITKIND_NO_HISTORY;
2153
2154 return status;
2155 }
2156
2157 /* Return a target_waitstatus indicating that a step finished. */
2158
2159 static struct target_waitstatus
2160 btrace_step_stopped (void)
2161 {
2162 struct target_waitstatus status;
2163
2164 status.kind = TARGET_WAITKIND_STOPPED;
2165 status.value.sig = GDB_SIGNAL_TRAP;
2166
2167 return status;
2168 }
2169
2170 /* Return a target_waitstatus indicating that a thread was stopped as
2171 requested. */
2172
2173 static struct target_waitstatus
2174 btrace_step_stopped_on_request (void)
2175 {
2176 struct target_waitstatus status;
2177
2178 status.kind = TARGET_WAITKIND_STOPPED;
2179 status.value.sig = GDB_SIGNAL_0;
2180
2181 return status;
2182 }
2183
2184 /* Return a target_waitstatus indicating a spurious stop. */
2185
2186 static struct target_waitstatus
2187 btrace_step_spurious (void)
2188 {
2189 struct target_waitstatus status;
2190
2191 status.kind = TARGET_WAITKIND_SPURIOUS;
2192
2193 return status;
2194 }
2195
2196 /* Return a target_waitstatus indicating that the thread was not resumed. */
2197
2198 static struct target_waitstatus
2199 btrace_step_no_resumed (void)
2200 {
2201 struct target_waitstatus status;
2202
2203 status.kind = TARGET_WAITKIND_NO_RESUMED;
2204
2205 return status;
2206 }
2207
2208 /* Return a target_waitstatus indicating that we should wait again. */
2209
2210 static struct target_waitstatus
2211 btrace_step_again (void)
2212 {
2213 struct target_waitstatus status;
2214
2215 status.kind = TARGET_WAITKIND_IGNORE;
2216
2217 return status;
2218 }
2219
2220 /* Clear the record histories. */
2221
2222 static void
2223 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2224 {
2225 xfree (btinfo->insn_history);
2226 xfree (btinfo->call_history);
2227
2228 btinfo->insn_history = NULL;
2229 btinfo->call_history = NULL;
2230 }
2231
2232 /* Check whether TP's current replay position is at a breakpoint. */
2233
2234 static int
2235 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2236 {
2237 struct btrace_insn_iterator *replay;
2238 struct btrace_thread_info *btinfo;
2239 const struct btrace_insn *insn;
2240 struct inferior *inf;
2241
2242 btinfo = &tp->btrace;
2243 replay = btinfo->replay;
2244
2245 if (replay == NULL)
2246 return 0;
2247
2248 insn = btrace_insn_get (replay);
2249 if (insn == NULL)
2250 return 0;
2251
2252 inf = find_inferior_ptid (tp->ptid);
2253 if (inf == NULL)
2254 return 0;
2255
2256 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2257 &btinfo->stop_reason);
2258 }
2259
2260 /* Step one instruction in forward direction. */
2261
2262 static struct target_waitstatus
2263 record_btrace_single_step_forward (struct thread_info *tp)
2264 {
2265 struct btrace_insn_iterator *replay, end;
2266 struct btrace_thread_info *btinfo;
2267
2268 btinfo = &tp->btrace;
2269 replay = btinfo->replay;
2270
2271 /* We're done if we're not replaying. */
2272 if (replay == NULL)
2273 return btrace_step_no_history ();
2274
2275 /* Check if we're stepping a breakpoint. */
2276 if (record_btrace_replay_at_breakpoint (tp))
2277 return btrace_step_stopped ();
2278
2279 /* Skip gaps during replay. */
2280 do
2281 {
2282 unsigned int steps;
2283
2284 /* We will bail out here if we continue stepping after reaching the end
2285 of the execution history. */
2286 steps = btrace_insn_next (replay, 1);
2287 if (steps == 0)
2288 return btrace_step_no_history ();
2289 }
2290 while (btrace_insn_get (replay) == NULL);
2291
2292 /* Determine the end of the instruction trace. */
2293 btrace_insn_end (&end, btinfo);
2294
2295 /* The execution trace contains (and ends with) the current instruction.
2296 This instruction has not been executed, yet, so the trace really ends
2297 one instruction earlier. */
2298 if (btrace_insn_cmp (replay, &end) == 0)
2299 return btrace_step_no_history ();
2300
2301 return btrace_step_spurious ();
2302 }
2303
2304 /* Step one instruction in backward direction. */
2305
2306 static struct target_waitstatus
2307 record_btrace_single_step_backward (struct thread_info *tp)
2308 {
2309 struct btrace_insn_iterator *replay;
2310 struct btrace_thread_info *btinfo;
2311
2312 btinfo = &tp->btrace;
2313 replay = btinfo->replay;
2314
2315 /* Start replaying if we're not already doing so. */
2316 if (replay == NULL)
2317 replay = record_btrace_start_replaying (tp);
2318
2319 /* If we can't step any further, we reached the end of the history.
2320 Skip gaps during replay. */
2321 do
2322 {
2323 unsigned int steps;
2324
2325 steps = btrace_insn_prev (replay, 1);
2326 if (steps == 0)
2327 return btrace_step_no_history ();
2328 }
2329 while (btrace_insn_get (replay) == NULL);
2330
2331 /* Check if we're stepping a breakpoint.
2332
2333 For reverse-stepping, this check is after the step. There is logic in
2334 infrun.c that handles reverse-stepping separately. See, for example,
2335 proceed and adjust_pc_after_break.
2336
2337 This code assumes that for reverse-stepping, PC points to the last
2338 de-executed instruction, whereas for forward-stepping PC points to the
2339 next to-be-executed instruction. */
2340 if (record_btrace_replay_at_breakpoint (tp))
2341 return btrace_step_stopped ();
2342
2343 return btrace_step_spurious ();
2344 }
2345
2346 /* Step a single thread. */
2347
2348 static struct target_waitstatus
2349 record_btrace_step_thread (struct thread_info *tp)
2350 {
2351 struct btrace_thread_info *btinfo;
2352 struct target_waitstatus status;
2353 enum btrace_thread_flag flags;
2354
2355 btinfo = &tp->btrace;
2356
2357 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2358 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2359
2360 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2361 target_pid_to_str (tp->ptid), flags,
2362 btrace_thread_flag_to_str (flags));
2363
2364 /* We can't step without an execution history. */
2365 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2366 return btrace_step_no_history ();
2367
2368 switch (flags)
2369 {
2370 default:
2371 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2372
2373 case BTHR_STOP:
2374 return btrace_step_stopped_on_request ();
2375
2376 case BTHR_STEP:
2377 status = record_btrace_single_step_forward (tp);
2378 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2379 break;
2380
2381 return btrace_step_stopped ();
2382
2383 case BTHR_RSTEP:
2384 status = record_btrace_single_step_backward (tp);
2385 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2386 break;
2387
2388 return btrace_step_stopped ();
2389
2390 case BTHR_CONT:
2391 status = record_btrace_single_step_forward (tp);
2392 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2393 break;
2394
2395 btinfo->flags |= flags;
2396 return btrace_step_again ();
2397
2398 case BTHR_RCONT:
2399 status = record_btrace_single_step_backward (tp);
2400 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2401 break;
2402
2403 btinfo->flags |= flags;
2404 return btrace_step_again ();
2405 }
2406
2407 /* We keep threads moving at the end of their execution history. The to_wait
2408 method will stop the thread for whom the event is reported. */
2409 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2410 btinfo->flags |= flags;
2411
2412 return status;
2413 }
2414
2415 /* A vector of threads. */
2416
2417 typedef struct thread_info * tp_t;
2418 DEF_VEC_P (tp_t);
2419
2420 /* Announce further events if necessary. */
2421
2422 static void
2423 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2424 const VEC (tp_t) *no_history)
2425 {
2426 int more_moving, more_no_history;
2427
2428 more_moving = !VEC_empty (tp_t, moving);
2429 more_no_history = !VEC_empty (tp_t, no_history);
2430
2431 if (!more_moving && !more_no_history)
2432 return;
2433
2434 if (more_moving)
2435 DEBUG ("movers pending");
2436
2437 if (more_no_history)
2438 DEBUG ("no-history pending");
2439
2440 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2441 }
2442
2443 /* The to_wait method of target record-btrace. */
2444
2445 static ptid_t
2446 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2447 struct target_waitstatus *status, int options)
2448 {
2449 VEC (tp_t) *moving, *no_history;
2450 struct thread_info *tp, *eventing;
2451 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2452
2453 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2454
2455 /* As long as we're not replaying, just forward the request. */
2456 if ((execution_direction != EXEC_REVERSE)
2457 && !record_btrace_is_replaying (ops, minus_one_ptid))
2458 {
2459 ops = ops->beneath;
2460 return ops->to_wait (ops, ptid, status, options);
2461 }
2462
2463 moving = NULL;
2464 no_history = NULL;
2465
2466 make_cleanup (VEC_cleanup (tp_t), &moving);
2467 make_cleanup (VEC_cleanup (tp_t), &no_history);
2468
2469 /* Keep a work list of moving threads. */
2470 ALL_NON_EXITED_THREADS (tp)
2471 if (ptid_match (tp->ptid, ptid)
2472 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2473 VEC_safe_push (tp_t, moving, tp);
2474
2475 if (VEC_empty (tp_t, moving))
2476 {
2477 *status = btrace_step_no_resumed ();
2478
2479 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2480 target_waitstatus_to_string (status));
2481
2482 do_cleanups (cleanups);
2483 return null_ptid;
2484 }
2485
2486 /* Step moving threads one by one, one step each, until either one thread
2487 reports an event or we run out of threads to step.
2488
2489 When stepping more than one thread, chances are that some threads reach
2490 the end of their execution history earlier than others. If we reported
2491 this immediately, all-stop on top of non-stop would stop all threads and
2492 resume the same threads next time. And we would report the same thread
2493 having reached the end of its execution history again.
2494
2495 In the worst case, this would starve the other threads. But even if other
2496 threads would be allowed to make progress, this would result in far too
2497 many intermediate stops.
2498
2499 We therefore delay the reporting of "no execution history" until we have
2500 nothing else to report. By this time, all threads should have moved to
2501 either the beginning or the end of their execution history. There will
2502 be a single user-visible stop. */
2503 eventing = NULL;
2504 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2505 {
2506 unsigned int ix;
2507
2508 ix = 0;
2509 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2510 {
2511 *status = record_btrace_step_thread (tp);
2512
2513 switch (status->kind)
2514 {
2515 case TARGET_WAITKIND_IGNORE:
2516 ix++;
2517 break;
2518
2519 case TARGET_WAITKIND_NO_HISTORY:
2520 VEC_safe_push (tp_t, no_history,
2521 VEC_ordered_remove (tp_t, moving, ix));
2522 break;
2523
2524 default:
2525 eventing = VEC_unordered_remove (tp_t, moving, ix);
2526 break;
2527 }
2528 }
2529 }
2530
2531 if (eventing == NULL)
2532 {
2533 /* We started with at least one moving thread. This thread must have
2534 either stopped or reached the end of its execution history.
2535
2536 In the former case, EVENTING must not be NULL.
2537 In the latter case, NO_HISTORY must not be empty. */
2538 gdb_assert (!VEC_empty (tp_t, no_history));
2539
2540 /* We kept threads moving at the end of their execution history. Stop
2541 EVENTING now that we are going to report its stop. */
2542 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2543 eventing->btrace.flags &= ~BTHR_MOVE;
2544
2545 *status = btrace_step_no_history ();
2546 }
2547
2548 gdb_assert (eventing != NULL);
2549
2550 /* We kept threads replaying at the end of their execution history. Stop
2551 replaying EVENTING now that we are going to report its stop. */
2552 record_btrace_stop_replaying_at_end (eventing);
2553
2554 /* Stop all other threads. */
2555 if (!target_is_non_stop_p ())
2556 ALL_NON_EXITED_THREADS (tp)
2557 record_btrace_cancel_resume (tp);
2558
2559 /* In async mode, we need to announce further events. */
2560 if (target_is_async_p ())
2561 record_btrace_maybe_mark_async_event (moving, no_history);
2562
2563 /* Start record histories anew from the current position. */
2564 record_btrace_clear_histories (&eventing->btrace);
2565
2566 /* We moved the replay position but did not update registers. */
2567 registers_changed_ptid (eventing->ptid);
2568
2569 DEBUG ("wait ended by thread %s (%s): %s",
2570 print_thread_id (eventing),
2571 target_pid_to_str (eventing->ptid),
2572 target_waitstatus_to_string (status));
2573
2574 do_cleanups (cleanups);
2575 return eventing->ptid;
2576 }
2577
2578 /* The to_stop method of target record-btrace. */
2579
2580 static void
2581 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2582 {
2583 DEBUG ("stop %s", target_pid_to_str (ptid));
2584
2585 /* As long as we're not replaying, just forward the request. */
2586 if ((execution_direction != EXEC_REVERSE)
2587 && !record_btrace_is_replaying (ops, minus_one_ptid))
2588 {
2589 ops = ops->beneath;
2590 ops->to_stop (ops, ptid);
2591 }
2592 else
2593 {
2594 struct thread_info *tp;
2595
2596 ALL_NON_EXITED_THREADS (tp)
2597 if (ptid_match (tp->ptid, ptid))
2598 {
2599 tp->btrace.flags &= ~BTHR_MOVE;
2600 tp->btrace.flags |= BTHR_STOP;
2601 }
2602 }
2603 }
2604
2605 /* The to_can_execute_reverse method of target record-btrace. */
2606
2607 static int
2608 record_btrace_can_execute_reverse (struct target_ops *self)
2609 {
2610 return 1;
2611 }
2612
2613 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2614
2615 static int
2616 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2617 {
2618 if (record_btrace_is_replaying (ops, minus_one_ptid))
2619 {
2620 struct thread_info *tp = inferior_thread ();
2621
2622 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2623 }
2624
2625 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2626 }
2627
2628 /* The to_supports_stopped_by_sw_breakpoint method of target
2629 record-btrace. */
2630
2631 static int
2632 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2633 {
2634 if (record_btrace_is_replaying (ops, minus_one_ptid))
2635 return 1;
2636
2637 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2638 }
2639
2640 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2641
2642 static int
2643 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2644 {
2645 if (record_btrace_is_replaying (ops, minus_one_ptid))
2646 {
2647 struct thread_info *tp = inferior_thread ();
2648
2649 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2650 }
2651
2652 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2653 }
2654
2655 /* The to_supports_stopped_by_hw_breakpoint method of target
2656 record-btrace. */
2657
2658 static int
2659 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2660 {
2661 if (record_btrace_is_replaying (ops, minus_one_ptid))
2662 return 1;
2663
2664 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2665 }
2666
2667 /* The to_update_thread_list method of target record-btrace. */
2668
2669 static void
2670 record_btrace_update_thread_list (struct target_ops *ops)
2671 {
2672 /* We don't add or remove threads during replay. */
2673 if (record_btrace_is_replaying (ops, minus_one_ptid))
2674 return;
2675
2676 /* Forward the request. */
2677 ops = ops->beneath;
2678 ops->to_update_thread_list (ops);
2679 }
2680
2681 /* The to_thread_alive method of target record-btrace. */
2682
2683 static int
2684 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2685 {
2686 /* We don't add or remove threads during replay. */
2687 if (record_btrace_is_replaying (ops, minus_one_ptid))
2688 return find_thread_ptid (ptid) != NULL;
2689
2690 /* Forward the request. */
2691 ops = ops->beneath;
2692 return ops->to_thread_alive (ops, ptid);
2693 }
2694
2695 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2696 is stopped. */
2697
2698 static void
2699 record_btrace_set_replay (struct thread_info *tp,
2700 const struct btrace_insn_iterator *it)
2701 {
2702 struct btrace_thread_info *btinfo;
2703
2704 btinfo = &tp->btrace;
2705
2706 if (it == NULL || it->function == NULL)
2707 record_btrace_stop_replaying (tp);
2708 else
2709 {
2710 if (btinfo->replay == NULL)
2711 record_btrace_start_replaying (tp);
2712 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2713 return;
2714
2715 *btinfo->replay = *it;
2716 registers_changed_ptid (tp->ptid);
2717 }
2718
2719 /* Start anew from the new replay position. */
2720 record_btrace_clear_histories (btinfo);
2721
2722 stop_pc = regcache_read_pc (get_current_regcache ());
2723 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2724 }
2725
2726 /* The to_goto_record_begin method of target record-btrace. */
2727
2728 static void
2729 record_btrace_goto_begin (struct target_ops *self)
2730 {
2731 struct thread_info *tp;
2732 struct btrace_insn_iterator begin;
2733
2734 tp = require_btrace_thread ();
2735
2736 btrace_insn_begin (&begin, &tp->btrace);
2737 record_btrace_set_replay (tp, &begin);
2738 }
2739
2740 /* The to_goto_record_end method of target record-btrace. */
2741
2742 static void
2743 record_btrace_goto_end (struct target_ops *ops)
2744 {
2745 struct thread_info *tp;
2746
2747 tp = require_btrace_thread ();
2748
2749 record_btrace_set_replay (tp, NULL);
2750 }
2751
2752 /* The to_goto_record method of target record-btrace. */
2753
2754 static void
2755 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2756 {
2757 struct thread_info *tp;
2758 struct btrace_insn_iterator it;
2759 unsigned int number;
2760 int found;
2761
2762 number = insn;
2763
2764 /* Check for wrap-arounds. */
2765 if (number != insn)
2766 error (_("Instruction number out of range."));
2767
2768 tp = require_btrace_thread ();
2769
2770 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2771 if (found == 0)
2772 error (_("No such instruction."));
2773
2774 record_btrace_set_replay (tp, &it);
2775 }
2776
2777 /* The to_record_stop_replaying method of target record-btrace. */
2778
2779 static void
2780 record_btrace_stop_replaying_all (struct target_ops *self)
2781 {
2782 struct thread_info *tp;
2783
2784 ALL_NON_EXITED_THREADS (tp)
2785 record_btrace_stop_replaying (tp);
2786 }
2787
2788 /* The to_execution_direction target method. */
2789
2790 static enum exec_direction_kind
2791 record_btrace_execution_direction (struct target_ops *self)
2792 {
2793 return record_btrace_resume_exec_dir;
2794 }
2795
2796 /* The to_prepare_to_generate_core target method. */
2797
2798 static void
2799 record_btrace_prepare_to_generate_core (struct target_ops *self)
2800 {
2801 record_btrace_generating_corefile = 1;
2802 }
2803
2804 /* The to_done_generating_core target method. */
2805
2806 static void
2807 record_btrace_done_generating_core (struct target_ops *self)
2808 {
2809 record_btrace_generating_corefile = 0;
2810 }
2811
2812 /* Initialize the record-btrace target ops. */
2813
2814 static void
2815 init_record_btrace_ops (void)
2816 {
2817 struct target_ops *ops;
2818
2819 ops = &record_btrace_ops;
2820 ops->to_shortname = "record-btrace";
2821 ops->to_longname = "Branch tracing target";
2822 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2823 ops->to_open = record_btrace_open;
2824 ops->to_close = record_btrace_close;
2825 ops->to_async = record_btrace_async;
2826 ops->to_detach = record_detach;
2827 ops->to_disconnect = record_disconnect;
2828 ops->to_mourn_inferior = record_mourn_inferior;
2829 ops->to_kill = record_kill;
2830 ops->to_stop_recording = record_btrace_stop_recording;
2831 ops->to_info_record = record_btrace_info;
2832 ops->to_insn_history = record_btrace_insn_history;
2833 ops->to_insn_history_from = record_btrace_insn_history_from;
2834 ops->to_insn_history_range = record_btrace_insn_history_range;
2835 ops->to_call_history = record_btrace_call_history;
2836 ops->to_call_history_from = record_btrace_call_history_from;
2837 ops->to_call_history_range = record_btrace_call_history_range;
2838 ops->to_record_is_replaying = record_btrace_is_replaying;
2839 ops->to_record_will_replay = record_btrace_will_replay;
2840 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2841 ops->to_xfer_partial = record_btrace_xfer_partial;
2842 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2843 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2844 ops->to_fetch_registers = record_btrace_fetch_registers;
2845 ops->to_store_registers = record_btrace_store_registers;
2846 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2847 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2848 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2849 ops->to_resume = record_btrace_resume;
2850 ops->to_wait = record_btrace_wait;
2851 ops->to_stop = record_btrace_stop;
2852 ops->to_update_thread_list = record_btrace_update_thread_list;
2853 ops->to_thread_alive = record_btrace_thread_alive;
2854 ops->to_goto_record_begin = record_btrace_goto_begin;
2855 ops->to_goto_record_end = record_btrace_goto_end;
2856 ops->to_goto_record = record_btrace_goto;
2857 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2858 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2859 ops->to_supports_stopped_by_sw_breakpoint
2860 = record_btrace_supports_stopped_by_sw_breakpoint;
2861 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2862 ops->to_supports_stopped_by_hw_breakpoint
2863 = record_btrace_supports_stopped_by_hw_breakpoint;
2864 ops->to_execution_direction = record_btrace_execution_direction;
2865 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2866 ops->to_done_generating_core = record_btrace_done_generating_core;
2867 ops->to_stratum = record_stratum;
2868 ops->to_magic = OPS_MAGIC;
2869 }
2870
2871 /* Start recording in BTS format. */
2872
2873 static void
2874 cmd_record_btrace_bts_start (char *args, int from_tty)
2875 {
2876 if (args != NULL && *args != 0)
2877 error (_("Invalid argument."));
2878
2879 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2880
2881 TRY
2882 {
2883 execute_command ("target record-btrace", from_tty);
2884 }
2885 CATCH (exception, RETURN_MASK_ALL)
2886 {
2887 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2888 throw_exception (exception);
2889 }
2890 END_CATCH
2891 }
2892
2893 /* Start recording in Intel Processor Trace format. */
2894
2895 static void
2896 cmd_record_btrace_pt_start (char *args, int from_tty)
2897 {
2898 if (args != NULL && *args != 0)
2899 error (_("Invalid argument."));
2900
2901 record_btrace_conf.format = BTRACE_FORMAT_PT;
2902
2903 TRY
2904 {
2905 execute_command ("target record-btrace", from_tty);
2906 }
2907 CATCH (exception, RETURN_MASK_ALL)
2908 {
2909 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2910 throw_exception (exception);
2911 }
2912 END_CATCH
2913 }
2914
2915 /* Alias for "target record". */
2916
2917 static void
2918 cmd_record_btrace_start (char *args, int from_tty)
2919 {
2920 if (args != NULL && *args != 0)
2921 error (_("Invalid argument."));
2922
2923 record_btrace_conf.format = BTRACE_FORMAT_PT;
2924
2925 TRY
2926 {
2927 execute_command ("target record-btrace", from_tty);
2928 }
2929 CATCH (exception, RETURN_MASK_ALL)
2930 {
2931 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2932
2933 TRY
2934 {
2935 execute_command ("target record-btrace", from_tty);
2936 }
2937 CATCH (exception, RETURN_MASK_ALL)
2938 {
2939 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2940 throw_exception (exception);
2941 }
2942 END_CATCH
2943 }
2944 END_CATCH
2945 }
2946
2947 /* The "set record btrace" command. */
2948
2949 static void
2950 cmd_set_record_btrace (char *args, int from_tty)
2951 {
2952 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2953 }
2954
2955 /* The "show record btrace" command. */
2956
2957 static void
2958 cmd_show_record_btrace (char *args, int from_tty)
2959 {
2960 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2961 }
2962
2963 /* The "show record btrace replay-memory-access" command. */
2964
2965 static void
2966 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2967 struct cmd_list_element *c, const char *value)
2968 {
2969 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2970 replay_memory_access);
2971 }
2972
2973 /* The "set record btrace bts" command. */
2974
2975 static void
2976 cmd_set_record_btrace_bts (char *args, int from_tty)
2977 {
2978 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2979 "by an appropriate subcommand.\n"));
2980 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2981 all_commands, gdb_stdout);
2982 }
2983
2984 /* The "show record btrace bts" command. */
2985
2986 static void
2987 cmd_show_record_btrace_bts (char *args, int from_tty)
2988 {
2989 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2990 }
2991
2992 /* The "set record btrace pt" command. */
2993
2994 static void
2995 cmd_set_record_btrace_pt (char *args, int from_tty)
2996 {
2997 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2998 "by an appropriate subcommand.\n"));
2999 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3000 all_commands, gdb_stdout);
3001 }
3002
3003 /* The "show record btrace pt" command. */
3004
3005 static void
3006 cmd_show_record_btrace_pt (char *args, int from_tty)
3007 {
3008 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3009 }
3010
3011 /* The "record bts buffer-size" show value function. */
3012
3013 static void
3014 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3015 struct cmd_list_element *c,
3016 const char *value)
3017 {
3018 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3019 value);
3020 }
3021
3022 /* The "record pt buffer-size" show value function. */
3023
3024 static void
3025 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3026 struct cmd_list_element *c,
3027 const char *value)
3028 {
3029 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3030 value);
3031 }
3032
3033 void _initialize_record_btrace (void);
3034
3035 /* Initialize btrace commands. */
3036
3037 void
3038 _initialize_record_btrace (void)
3039 {
3040 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3041 _("Start branch trace recording."), &record_btrace_cmdlist,
3042 "record btrace ", 0, &record_cmdlist);
3043 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3044
3045 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3046 _("\
3047 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3048 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3049 This format may not be available on all processors."),
3050 &record_btrace_cmdlist);
3051 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3052
3053 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3054 _("\
3055 Start branch trace recording in Intel Processor Trace format.\n\n\
3056 This format may not be available on all processors."),
3057 &record_btrace_cmdlist);
3058 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3059
3060 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3061 _("Set record options"), &set_record_btrace_cmdlist,
3062 "set record btrace ", 0, &set_record_cmdlist);
3063
3064 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3065 _("Show record options"), &show_record_btrace_cmdlist,
3066 "show record btrace ", 0, &show_record_cmdlist);
3067
3068 add_setshow_enum_cmd ("replay-memory-access", no_class,
3069 replay_memory_access_types, &replay_memory_access, _("\
3070 Set what memory accesses are allowed during replay."), _("\
3071 Show what memory accesses are allowed during replay."),
3072 _("Default is READ-ONLY.\n\n\
3073 The btrace record target does not trace data.\n\
3074 The memory therefore corresponds to the live target and not \
3075 to the current replay position.\n\n\
3076 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3077 When READ-WRITE, allow accesses to read-only and read-write memory during \
3078 replay."),
3079 NULL, cmd_show_replay_memory_access,
3080 &set_record_btrace_cmdlist,
3081 &show_record_btrace_cmdlist);
3082
3083 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3084 _("Set record btrace bts options"),
3085 &set_record_btrace_bts_cmdlist,
3086 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3087
3088 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3089 _("Show record btrace bts options"),
3090 &show_record_btrace_bts_cmdlist,
3091 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3092
3093 add_setshow_uinteger_cmd ("buffer-size", no_class,
3094 &record_btrace_conf.bts.size,
3095 _("Set the record/replay bts buffer size."),
3096 _("Show the record/replay bts buffer size."), _("\
3097 When starting recording request a trace buffer of this size. \
3098 The actual buffer size may differ from the requested size. \
3099 Use \"info record\" to see the actual buffer size.\n\n\
3100 Bigger buffers allow longer recording but also take more time to process \
3101 the recorded execution trace.\n\n\
3102 The trace buffer size may not be changed while recording."), NULL,
3103 show_record_bts_buffer_size_value,
3104 &set_record_btrace_bts_cmdlist,
3105 &show_record_btrace_bts_cmdlist);
3106
3107 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3108 _("Set record btrace pt options"),
3109 &set_record_btrace_pt_cmdlist,
3110 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3111
3112 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3113 _("Show record btrace pt options"),
3114 &show_record_btrace_pt_cmdlist,
3115 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3116
3117 add_setshow_uinteger_cmd ("buffer-size", no_class,
3118 &record_btrace_conf.pt.size,
3119 _("Set the record/replay pt buffer size."),
3120 _("Show the record/replay pt buffer size."), _("\
3121 Bigger buffers allow longer recording but also take more time to process \
3122 the recorded execution.\n\
3123 The actual buffer size may differ from the requested size. Use \"info record\" \
3124 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3125 &set_record_btrace_pt_cmdlist,
3126 &show_record_btrace_pt_cmdlist);
3127
3128 init_record_btrace_ops ();
3129 add_target (&record_btrace_ops);
3130
3131 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3132 xcalloc, xfree);
3133
3134 record_btrace_conf.bts.size = 64 * 1024;
3135 record_btrace_conf.pt.size = 16 * 1024;
3136 }