]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
btrace: Remove struct btrace_thread_info::{begin,end}.
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr = btrace_decode_error (format, errcode);
472
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
476 {
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
480 }
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
483 }
484
485 /* Print an unsigned int. */
486
487 static void
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489 {
490 uiout->field_fmt (fld, "%u", val);
491 }
492
493 /* A range of source lines. */
494
495 struct btrace_line_range
496 {
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505 };
506
507 /* Construct a line range. */
508
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511 {
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519 }
520
521 /* Add a line to a line range. */
522
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
525 {
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538 }
539
540 /* Return non-zero if RANGE is empty, zero otherwise. */
541
542 static int
543 btrace_line_range_is_empty (struct btrace_line_range range)
544 {
545 return range.end <= range.begin;
546 }
547
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
549
550 static int
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553 {
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557 }
558
559 /* Find the line range associated with PC. */
560
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
563 {
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591 }
592
593 /* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602 static void
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605 {
606 print_source_lines_flags psl_flags;
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625 }
626
627 /* Disassemble a section of the recorded instruction trace. */
628
629 static void
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end, int flags)
634 {
635 struct cleanup *cleanups, *ui_item_chain;
636 struct gdbarch *gdbarch;
637 struct btrace_insn_iterator it;
638 struct btrace_line_range last_lines;
639
640 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
641 btrace_insn_number (end));
642
643 flags |= DISASSEMBLY_SPECULATIVE;
644
645 gdbarch = target_gdbarch ();
646 last_lines = btrace_mk_line_range (NULL, 0, 0);
647
648 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
649
650 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
651 instructions corresponding to that line. */
652 ui_item_chain = NULL;
653
654 gdb_pretty_print_disassembler disasm (gdbarch);
655
656 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
657 {
658 const struct btrace_insn *insn;
659
660 insn = btrace_insn_get (&it);
661
662 /* A NULL instruction indicates a gap in the trace. */
663 if (insn == NULL)
664 {
665 const struct btrace_config *conf;
666
667 conf = btrace_conf (btinfo);
668
669 /* We have trace so we must have a configuration. */
670 gdb_assert (conf != NULL);
671
672 uiout->field_fmt ("insn-number", "%u",
673 btrace_insn_number (&it));
674 uiout->text ("\t");
675
676 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
677 conf->format);
678 }
679 else
680 {
681 struct disasm_insn dinsn;
682
683 if ((flags & DISASSEMBLY_SOURCE) != 0)
684 {
685 struct btrace_line_range lines;
686
687 lines = btrace_find_line_range (insn->pc);
688 if (!btrace_line_range_is_empty (lines)
689 && !btrace_line_range_contains_range (last_lines, lines))
690 {
691 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
692 last_lines = lines;
693 }
694 else if (ui_item_chain == NULL)
695 {
696 ui_item_chain
697 = make_cleanup_ui_out_tuple_begin_end (uiout,
698 "src_and_asm_line");
699 /* No source information. */
700 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
701 }
702
703 gdb_assert (ui_item_chain != NULL);
704 }
705
706 memset (&dinsn, 0, sizeof (dinsn));
707 dinsn.number = btrace_insn_number (&it);
708 dinsn.addr = insn->pc;
709
710 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
711 dinsn.is_speculative = 1;
712
713 disasm.pretty_print_insn (uiout, &dinsn, flags);
714 }
715 }
716
717 do_cleanups (cleanups);
718 }
719
720 /* The to_insn_history method of target record-btrace. */
721
722 static void
723 record_btrace_insn_history (struct target_ops *self, int size, int flags)
724 {
725 struct btrace_thread_info *btinfo;
726 struct btrace_insn_history *history;
727 struct btrace_insn_iterator begin, end;
728 struct ui_out *uiout;
729 unsigned int context, covered;
730
731 uiout = current_uiout;
732 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
733 context = abs (size);
734 if (context == 0)
735 error (_("Bad record instruction-history-size."));
736
737 btinfo = require_btrace ();
738 history = btinfo->insn_history;
739 if (history == NULL)
740 {
741 struct btrace_insn_iterator *replay;
742
743 DEBUG ("insn-history (0x%x): %d", flags, size);
744
745 /* If we're replaying, we start at the replay position. Otherwise, we
746 start at the tail of the trace. */
747 replay = btinfo->replay;
748 if (replay != NULL)
749 begin = *replay;
750 else
751 btrace_insn_end (&begin, btinfo);
752
753 /* We start from here and expand in the requested direction. Then we
754 expand in the other direction, as well, to fill up any remaining
755 context. */
756 end = begin;
757 if (size < 0)
758 {
759 /* We want the current position covered, as well. */
760 covered = btrace_insn_next (&end, 1);
761 covered += btrace_insn_prev (&begin, context - covered);
762 covered += btrace_insn_next (&end, context - covered);
763 }
764 else
765 {
766 covered = btrace_insn_next (&end, context);
767 covered += btrace_insn_prev (&begin, context - covered);
768 }
769 }
770 else
771 {
772 begin = history->begin;
773 end = history->end;
774
775 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
776 btrace_insn_number (&begin), btrace_insn_number (&end));
777
778 if (size < 0)
779 {
780 end = begin;
781 covered = btrace_insn_prev (&begin, context);
782 }
783 else
784 {
785 begin = end;
786 covered = btrace_insn_next (&end, context);
787 }
788 }
789
790 if (covered > 0)
791 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
792 else
793 {
794 if (size < 0)
795 printf_unfiltered (_("At the start of the branch trace record.\n"));
796 else
797 printf_unfiltered (_("At the end of the branch trace record.\n"));
798 }
799
800 btrace_set_insn_history (btinfo, &begin, &end);
801 }
802
803 /* The to_insn_history_range method of target record-btrace. */
804
805 static void
806 record_btrace_insn_history_range (struct target_ops *self,
807 ULONGEST from, ULONGEST to, int flags)
808 {
809 struct btrace_thread_info *btinfo;
810 struct btrace_insn_history *history;
811 struct btrace_insn_iterator begin, end;
812 struct ui_out *uiout;
813 unsigned int low, high;
814 int found;
815
816 uiout = current_uiout;
817 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
818 low = from;
819 high = to;
820
821 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
822
823 /* Check for wrap-arounds. */
824 if (low != from || high != to)
825 error (_("Bad range."));
826
827 if (high < low)
828 error (_("Bad range."));
829
830 btinfo = require_btrace ();
831
832 found = btrace_find_insn_by_number (&begin, btinfo, low);
833 if (found == 0)
834 error (_("Range out of bounds."));
835
836 found = btrace_find_insn_by_number (&end, btinfo, high);
837 if (found == 0)
838 {
839 /* Silently truncate the range. */
840 btrace_insn_end (&end, btinfo);
841 }
842 else
843 {
844 /* We want both begin and end to be inclusive. */
845 btrace_insn_next (&end, 1);
846 }
847
848 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
849 btrace_set_insn_history (btinfo, &begin, &end);
850 }
851
852 /* The to_insn_history_from method of target record-btrace. */
853
854 static void
855 record_btrace_insn_history_from (struct target_ops *self,
856 ULONGEST from, int size, int flags)
857 {
858 ULONGEST begin, end, context;
859
860 context = abs (size);
861 if (context == 0)
862 error (_("Bad record instruction-history-size."));
863
864 if (size < 0)
865 {
866 end = from;
867
868 if (from < context)
869 begin = 0;
870 else
871 begin = from - context + 1;
872 }
873 else
874 {
875 begin = from;
876 end = from + context - 1;
877
878 /* Check for wrap-around. */
879 if (end < begin)
880 end = ULONGEST_MAX;
881 }
882
883 record_btrace_insn_history_range (self, begin, end, flags);
884 }
885
886 /* Print the instruction number range for a function call history line. */
887
888 static void
889 btrace_call_history_insn_range (struct ui_out *uiout,
890 const struct btrace_function *bfun)
891 {
892 unsigned int begin, end, size;
893
894 size = VEC_length (btrace_insn_s, bfun->insn);
895 gdb_assert (size > 0);
896
897 begin = bfun->insn_offset;
898 end = begin + size - 1;
899
900 ui_out_field_uint (uiout, "insn begin", begin);
901 uiout->text (",");
902 ui_out_field_uint (uiout, "insn end", end);
903 }
904
905 /* Compute the lowest and highest source line for the instructions in BFUN
906 and return them in PBEGIN and PEND.
907 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
908 result from inlining or macro expansion. */
909
910 static void
911 btrace_compute_src_line_range (const struct btrace_function *bfun,
912 int *pbegin, int *pend)
913 {
914 struct btrace_insn *insn;
915 struct symtab *symtab;
916 struct symbol *sym;
917 unsigned int idx;
918 int begin, end;
919
920 begin = INT_MAX;
921 end = INT_MIN;
922
923 sym = bfun->sym;
924 if (sym == NULL)
925 goto out;
926
927 symtab = symbol_symtab (sym);
928
929 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
930 {
931 struct symtab_and_line sal;
932
933 sal = find_pc_line (insn->pc, 0);
934 if (sal.symtab != symtab || sal.line == 0)
935 continue;
936
937 begin = std::min (begin, sal.line);
938 end = std::max (end, sal.line);
939 }
940
941 out:
942 *pbegin = begin;
943 *pend = end;
944 }
945
946 /* Print the source line information for a function call history line. */
947
948 static void
949 btrace_call_history_src_line (struct ui_out *uiout,
950 const struct btrace_function *bfun)
951 {
952 struct symbol *sym;
953 int begin, end;
954
955 sym = bfun->sym;
956 if (sym == NULL)
957 return;
958
959 uiout->field_string ("file",
960 symtab_to_filename_for_display (symbol_symtab (sym)));
961
962 btrace_compute_src_line_range (bfun, &begin, &end);
963 if (end < begin)
964 return;
965
966 uiout->text (":");
967 uiout->field_int ("min line", begin);
968
969 if (end == begin)
970 return;
971
972 uiout->text (",");
973 uiout->field_int ("max line", end);
974 }
975
976 /* Get the name of a branch trace function. */
977
978 static const char *
979 btrace_get_bfun_name (const struct btrace_function *bfun)
980 {
981 struct minimal_symbol *msym;
982 struct symbol *sym;
983
984 if (bfun == NULL)
985 return "??";
986
987 msym = bfun->msym;
988 sym = bfun->sym;
989
990 if (sym != NULL)
991 return SYMBOL_PRINT_NAME (sym);
992 else if (msym != NULL)
993 return MSYMBOL_PRINT_NAME (msym);
994 else
995 return "??";
996 }
997
998 /* Disassemble a section of the recorded function trace. */
999
1000 static void
1001 btrace_call_history (struct ui_out *uiout,
1002 const struct btrace_thread_info *btinfo,
1003 const struct btrace_call_iterator *begin,
1004 const struct btrace_call_iterator *end,
1005 int int_flags)
1006 {
1007 struct btrace_call_iterator it;
1008 record_print_flags flags = (enum record_print_flag) int_flags;
1009
1010 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1011 btrace_call_number (end));
1012
1013 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1014 {
1015 const struct btrace_function *bfun;
1016 struct minimal_symbol *msym;
1017 struct symbol *sym;
1018
1019 bfun = btrace_call_get (&it);
1020 sym = bfun->sym;
1021 msym = bfun->msym;
1022
1023 /* Print the function index. */
1024 ui_out_field_uint (uiout, "index", bfun->number);
1025 uiout->text ("\t");
1026
1027 /* Indicate gaps in the trace. */
1028 if (bfun->errcode != 0)
1029 {
1030 const struct btrace_config *conf;
1031
1032 conf = btrace_conf (btinfo);
1033
1034 /* We have trace so we must have a configuration. */
1035 gdb_assert (conf != NULL);
1036
1037 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1038
1039 continue;
1040 }
1041
1042 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1043 {
1044 int level = bfun->level + btinfo->level, i;
1045
1046 for (i = 0; i < level; ++i)
1047 uiout->text (" ");
1048 }
1049
1050 if (sym != NULL)
1051 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1052 else if (msym != NULL)
1053 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1054 else if (!uiout->is_mi_like_p ())
1055 uiout->field_string ("function", "??");
1056
1057 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1058 {
1059 uiout->text (_("\tinst "));
1060 btrace_call_history_insn_range (uiout, bfun);
1061 }
1062
1063 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1064 {
1065 uiout->text (_("\tat "));
1066 btrace_call_history_src_line (uiout, bfun);
1067 }
1068
1069 uiout->text ("\n");
1070 }
1071 }
1072
1073 /* The to_call_history method of target record-btrace. */
1074
1075 static void
1076 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1077 {
1078 struct btrace_thread_info *btinfo;
1079 struct btrace_call_history *history;
1080 struct btrace_call_iterator begin, end;
1081 struct ui_out *uiout;
1082 unsigned int context, covered;
1083 record_print_flags flags = (enum record_print_flag) int_flags;
1084
1085 uiout = current_uiout;
1086 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1087 context = abs (size);
1088 if (context == 0)
1089 error (_("Bad record function-call-history-size."));
1090
1091 btinfo = require_btrace ();
1092 history = btinfo->call_history;
1093 if (history == NULL)
1094 {
1095 struct btrace_insn_iterator *replay;
1096
1097 DEBUG ("call-history (0x%x): %d", int_flags, size);
1098
1099 /* If we're replaying, we start at the replay position. Otherwise, we
1100 start at the tail of the trace. */
1101 replay = btinfo->replay;
1102 if (replay != NULL)
1103 {
1104 begin.btinfo = btinfo;
1105 begin.index = replay->call_index;
1106 }
1107 else
1108 btrace_call_end (&begin, btinfo);
1109
1110 /* We start from here and expand in the requested direction. Then we
1111 expand in the other direction, as well, to fill up any remaining
1112 context. */
1113 end = begin;
1114 if (size < 0)
1115 {
1116 /* We want the current position covered, as well. */
1117 covered = btrace_call_next (&end, 1);
1118 covered += btrace_call_prev (&begin, context - covered);
1119 covered += btrace_call_next (&end, context - covered);
1120 }
1121 else
1122 {
1123 covered = btrace_call_next (&end, context);
1124 covered += btrace_call_prev (&begin, context- covered);
1125 }
1126 }
1127 else
1128 {
1129 begin = history->begin;
1130 end = history->end;
1131
1132 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1133 btrace_call_number (&begin), btrace_call_number (&end));
1134
1135 if (size < 0)
1136 {
1137 end = begin;
1138 covered = btrace_call_prev (&begin, context);
1139 }
1140 else
1141 {
1142 begin = end;
1143 covered = btrace_call_next (&end, context);
1144 }
1145 }
1146
1147 if (covered > 0)
1148 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1149 else
1150 {
1151 if (size < 0)
1152 printf_unfiltered (_("At the start of the branch trace record.\n"));
1153 else
1154 printf_unfiltered (_("At the end of the branch trace record.\n"));
1155 }
1156
1157 btrace_set_call_history (btinfo, &begin, &end);
1158 }
1159
1160 /* The to_call_history_range method of target record-btrace. */
1161
1162 static void
1163 record_btrace_call_history_range (struct target_ops *self,
1164 ULONGEST from, ULONGEST to,
1165 int int_flags)
1166 {
1167 struct btrace_thread_info *btinfo;
1168 struct btrace_call_history *history;
1169 struct btrace_call_iterator begin, end;
1170 struct ui_out *uiout;
1171 unsigned int low, high;
1172 int found;
1173 record_print_flags flags = (enum record_print_flag) int_flags;
1174
1175 uiout = current_uiout;
1176 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1177 low = from;
1178 high = to;
1179
1180 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1181
1182 /* Check for wrap-arounds. */
1183 if (low != from || high != to)
1184 error (_("Bad range."));
1185
1186 if (high < low)
1187 error (_("Bad range."));
1188
1189 btinfo = require_btrace ();
1190
1191 found = btrace_find_call_by_number (&begin, btinfo, low);
1192 if (found == 0)
1193 error (_("Range out of bounds."));
1194
1195 found = btrace_find_call_by_number (&end, btinfo, high);
1196 if (found == 0)
1197 {
1198 /* Silently truncate the range. */
1199 btrace_call_end (&end, btinfo);
1200 }
1201 else
1202 {
1203 /* We want both begin and end to be inclusive. */
1204 btrace_call_next (&end, 1);
1205 }
1206
1207 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1208 btrace_set_call_history (btinfo, &begin, &end);
1209 }
1210
1211 /* The to_call_history_from method of target record-btrace. */
1212
1213 static void
1214 record_btrace_call_history_from (struct target_ops *self,
1215 ULONGEST from, int size,
1216 int int_flags)
1217 {
1218 ULONGEST begin, end, context;
1219 record_print_flags flags = (enum record_print_flag) int_flags;
1220
1221 context = abs (size);
1222 if (context == 0)
1223 error (_("Bad record function-call-history-size."));
1224
1225 if (size < 0)
1226 {
1227 end = from;
1228
1229 if (from < context)
1230 begin = 0;
1231 else
1232 begin = from - context + 1;
1233 }
1234 else
1235 {
1236 begin = from;
1237 end = from + context - 1;
1238
1239 /* Check for wrap-around. */
1240 if (end < begin)
1241 end = ULONGEST_MAX;
1242 }
1243
1244 record_btrace_call_history_range (self, begin, end, flags);
1245 }
1246
1247 /* The to_record_method method of target record-btrace. */
1248
1249 static enum record_method
1250 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1251 {
1252 const struct btrace_config *config;
1253 struct thread_info * const tp = find_thread_ptid (ptid);
1254
1255 if (tp == NULL)
1256 error (_("No thread."));
1257
1258 if (tp->btrace.target == NULL)
1259 return RECORD_METHOD_NONE;
1260
1261 return RECORD_METHOD_BTRACE;
1262 }
1263
1264 /* The to_record_is_replaying method of target record-btrace. */
1265
1266 static int
1267 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1268 {
1269 struct thread_info *tp;
1270
1271 ALL_NON_EXITED_THREADS (tp)
1272 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1273 return 1;
1274
1275 return 0;
1276 }
1277
1278 /* The to_record_will_replay method of target record-btrace. */
1279
1280 static int
1281 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1282 {
1283 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1284 }
1285
1286 /* The to_xfer_partial method of target record-btrace. */
1287
1288 static enum target_xfer_status
1289 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1290 const char *annex, gdb_byte *readbuf,
1291 const gdb_byte *writebuf, ULONGEST offset,
1292 ULONGEST len, ULONGEST *xfered_len)
1293 {
1294 struct target_ops *t;
1295
1296 /* Filter out requests that don't make sense during replay. */
1297 if (replay_memory_access == replay_memory_access_read_only
1298 && !record_btrace_generating_corefile
1299 && record_btrace_is_replaying (ops, inferior_ptid))
1300 {
1301 switch (object)
1302 {
1303 case TARGET_OBJECT_MEMORY:
1304 {
1305 struct target_section *section;
1306
1307 /* We do not allow writing memory in general. */
1308 if (writebuf != NULL)
1309 {
1310 *xfered_len = len;
1311 return TARGET_XFER_UNAVAILABLE;
1312 }
1313
1314 /* We allow reading readonly memory. */
1315 section = target_section_by_addr (ops, offset);
1316 if (section != NULL)
1317 {
1318 /* Check if the section we found is readonly. */
1319 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1320 section->the_bfd_section)
1321 & SEC_READONLY) != 0)
1322 {
1323 /* Truncate the request to fit into this section. */
1324 len = std::min (len, section->endaddr - offset);
1325 break;
1326 }
1327 }
1328
1329 *xfered_len = len;
1330 return TARGET_XFER_UNAVAILABLE;
1331 }
1332 }
1333 }
1334
1335 /* Forward the request. */
1336 ops = ops->beneath;
1337 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1338 offset, len, xfered_len);
1339 }
1340
1341 /* The to_insert_breakpoint method of target record-btrace. */
1342
1343 static int
1344 record_btrace_insert_breakpoint (struct target_ops *ops,
1345 struct gdbarch *gdbarch,
1346 struct bp_target_info *bp_tgt)
1347 {
1348 const char *old;
1349 int ret;
1350
1351 /* Inserting breakpoints requires accessing memory. Allow it for the
1352 duration of this function. */
1353 old = replay_memory_access;
1354 replay_memory_access = replay_memory_access_read_write;
1355
1356 ret = 0;
1357 TRY
1358 {
1359 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1360 }
1361 CATCH (except, RETURN_MASK_ALL)
1362 {
1363 replay_memory_access = old;
1364 throw_exception (except);
1365 }
1366 END_CATCH
1367 replay_memory_access = old;
1368
1369 return ret;
1370 }
1371
1372 /* The to_remove_breakpoint method of target record-btrace. */
1373
1374 static int
1375 record_btrace_remove_breakpoint (struct target_ops *ops,
1376 struct gdbarch *gdbarch,
1377 struct bp_target_info *bp_tgt,
1378 enum remove_bp_reason reason)
1379 {
1380 const char *old;
1381 int ret;
1382
1383 /* Removing breakpoints requires accessing memory. Allow it for the
1384 duration of this function. */
1385 old = replay_memory_access;
1386 replay_memory_access = replay_memory_access_read_write;
1387
1388 ret = 0;
1389 TRY
1390 {
1391 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1392 reason);
1393 }
1394 CATCH (except, RETURN_MASK_ALL)
1395 {
1396 replay_memory_access = old;
1397 throw_exception (except);
1398 }
1399 END_CATCH
1400 replay_memory_access = old;
1401
1402 return ret;
1403 }
1404
1405 /* The to_fetch_registers method of target record-btrace. */
1406
1407 static void
1408 record_btrace_fetch_registers (struct target_ops *ops,
1409 struct regcache *regcache, int regno)
1410 {
1411 struct btrace_insn_iterator *replay;
1412 struct thread_info *tp;
1413
1414 tp = find_thread_ptid (regcache_get_ptid (regcache));
1415 gdb_assert (tp != NULL);
1416
1417 replay = tp->btrace.replay;
1418 if (replay != NULL && !record_btrace_generating_corefile)
1419 {
1420 const struct btrace_insn *insn;
1421 struct gdbarch *gdbarch;
1422 int pcreg;
1423
1424 gdbarch = get_regcache_arch (regcache);
1425 pcreg = gdbarch_pc_regnum (gdbarch);
1426 if (pcreg < 0)
1427 return;
1428
1429 /* We can only provide the PC register. */
1430 if (regno >= 0 && regno != pcreg)
1431 return;
1432
1433 insn = btrace_insn_get (replay);
1434 gdb_assert (insn != NULL);
1435
1436 regcache_raw_supply (regcache, regno, &insn->pc);
1437 }
1438 else
1439 {
1440 struct target_ops *t = ops->beneath;
1441
1442 t->to_fetch_registers (t, regcache, regno);
1443 }
1444 }
1445
1446 /* The to_store_registers method of target record-btrace. */
1447
1448 static void
1449 record_btrace_store_registers (struct target_ops *ops,
1450 struct regcache *regcache, int regno)
1451 {
1452 struct target_ops *t;
1453
1454 if (!record_btrace_generating_corefile
1455 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1456 error (_("Cannot write registers while replaying."));
1457
1458 gdb_assert (may_write_registers != 0);
1459
1460 t = ops->beneath;
1461 t->to_store_registers (t, regcache, regno);
1462 }
1463
1464 /* The to_prepare_to_store method of target record-btrace. */
1465
1466 static void
1467 record_btrace_prepare_to_store (struct target_ops *ops,
1468 struct regcache *regcache)
1469 {
1470 struct target_ops *t;
1471
1472 if (!record_btrace_generating_corefile
1473 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1474 return;
1475
1476 t = ops->beneath;
1477 t->to_prepare_to_store (t, regcache);
1478 }
1479
1480 /* The branch trace frame cache. */
1481
1482 struct btrace_frame_cache
1483 {
1484 /* The thread. */
1485 struct thread_info *tp;
1486
1487 /* The frame info. */
1488 struct frame_info *frame;
1489
1490 /* The branch trace function segment. */
1491 const struct btrace_function *bfun;
1492 };
1493
1494 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1495
1496 static htab_t bfcache;
1497
1498 /* hash_f for htab_create_alloc of bfcache. */
1499
1500 static hashval_t
1501 bfcache_hash (const void *arg)
1502 {
1503 const struct btrace_frame_cache *cache
1504 = (const struct btrace_frame_cache *) arg;
1505
1506 return htab_hash_pointer (cache->frame);
1507 }
1508
1509 /* eq_f for htab_create_alloc of bfcache. */
1510
1511 static int
1512 bfcache_eq (const void *arg1, const void *arg2)
1513 {
1514 const struct btrace_frame_cache *cache1
1515 = (const struct btrace_frame_cache *) arg1;
1516 const struct btrace_frame_cache *cache2
1517 = (const struct btrace_frame_cache *) arg2;
1518
1519 return cache1->frame == cache2->frame;
1520 }
1521
1522 /* Create a new btrace frame cache. */
1523
1524 static struct btrace_frame_cache *
1525 bfcache_new (struct frame_info *frame)
1526 {
1527 struct btrace_frame_cache *cache;
1528 void **slot;
1529
1530 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1531 cache->frame = frame;
1532
1533 slot = htab_find_slot (bfcache, cache, INSERT);
1534 gdb_assert (*slot == NULL);
1535 *slot = cache;
1536
1537 return cache;
1538 }
1539
1540 /* Extract the branch trace function from a branch trace frame. */
1541
1542 static const struct btrace_function *
1543 btrace_get_frame_function (struct frame_info *frame)
1544 {
1545 const struct btrace_frame_cache *cache;
1546 const struct btrace_function *bfun;
1547 struct btrace_frame_cache pattern;
1548 void **slot;
1549
1550 pattern.frame = frame;
1551
1552 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1553 if (slot == NULL)
1554 return NULL;
1555
1556 cache = (const struct btrace_frame_cache *) *slot;
1557 return cache->bfun;
1558 }
1559
1560 /* Implement stop_reason method for record_btrace_frame_unwind. */
1561
1562 static enum unwind_stop_reason
1563 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1564 void **this_cache)
1565 {
1566 const struct btrace_frame_cache *cache;
1567 const struct btrace_function *bfun;
1568
1569 cache = (const struct btrace_frame_cache *) *this_cache;
1570 bfun = cache->bfun;
1571 gdb_assert (bfun != NULL);
1572
1573 if (bfun->up == NULL)
1574 return UNWIND_UNAVAILABLE;
1575
1576 return UNWIND_NO_REASON;
1577 }
1578
1579 /* Implement this_id method for record_btrace_frame_unwind. */
1580
1581 static void
1582 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1583 struct frame_id *this_id)
1584 {
1585 const struct btrace_frame_cache *cache;
1586 const struct btrace_function *bfun;
1587 CORE_ADDR code, special;
1588
1589 cache = (const struct btrace_frame_cache *) *this_cache;
1590
1591 bfun = cache->bfun;
1592 gdb_assert (bfun != NULL);
1593
1594 while (bfun->segment.prev != NULL)
1595 bfun = bfun->segment.prev;
1596
1597 code = get_frame_func (this_frame);
1598 special = bfun->number;
1599
1600 *this_id = frame_id_build_unavailable_stack_special (code, special);
1601
1602 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1603 btrace_get_bfun_name (cache->bfun),
1604 core_addr_to_string_nz (this_id->code_addr),
1605 core_addr_to_string_nz (this_id->special_addr));
1606 }
1607
1608 /* Implement prev_register method for record_btrace_frame_unwind. */
1609
1610 static struct value *
1611 record_btrace_frame_prev_register (struct frame_info *this_frame,
1612 void **this_cache,
1613 int regnum)
1614 {
1615 const struct btrace_frame_cache *cache;
1616 const struct btrace_function *bfun, *caller;
1617 const struct btrace_insn *insn;
1618 struct gdbarch *gdbarch;
1619 CORE_ADDR pc;
1620 int pcreg;
1621
1622 gdbarch = get_frame_arch (this_frame);
1623 pcreg = gdbarch_pc_regnum (gdbarch);
1624 if (pcreg < 0 || regnum != pcreg)
1625 throw_error (NOT_AVAILABLE_ERROR,
1626 _("Registers are not available in btrace record history"));
1627
1628 cache = (const struct btrace_frame_cache *) *this_cache;
1629 bfun = cache->bfun;
1630 gdb_assert (bfun != NULL);
1631
1632 caller = bfun->up;
1633 if (caller == NULL)
1634 throw_error (NOT_AVAILABLE_ERROR,
1635 _("No caller in btrace record history"));
1636
1637 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1638 {
1639 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1640 pc = insn->pc;
1641 }
1642 else
1643 {
1644 insn = VEC_last (btrace_insn_s, caller->insn);
1645 pc = insn->pc;
1646
1647 pc += gdb_insn_length (gdbarch, pc);
1648 }
1649
1650 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1651 btrace_get_bfun_name (bfun), bfun->level,
1652 core_addr_to_string_nz (pc));
1653
1654 return frame_unwind_got_address (this_frame, regnum, pc);
1655 }
1656
1657 /* Implement sniffer method for record_btrace_frame_unwind. */
1658
1659 static int
1660 record_btrace_frame_sniffer (const struct frame_unwind *self,
1661 struct frame_info *this_frame,
1662 void **this_cache)
1663 {
1664 const struct btrace_function *bfun;
1665 struct btrace_frame_cache *cache;
1666 struct thread_info *tp;
1667 struct frame_info *next;
1668
1669 /* THIS_FRAME does not contain a reference to its thread. */
1670 tp = find_thread_ptid (inferior_ptid);
1671 gdb_assert (tp != NULL);
1672
1673 bfun = NULL;
1674 next = get_next_frame (this_frame);
1675 if (next == NULL)
1676 {
1677 const struct btrace_insn_iterator *replay;
1678
1679 replay = tp->btrace.replay;
1680 if (replay != NULL)
1681 bfun = replay->btinfo->functions[replay->call_index];
1682 }
1683 else
1684 {
1685 const struct btrace_function *callee;
1686
1687 callee = btrace_get_frame_function (next);
1688 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1689 bfun = callee->up;
1690 }
1691
1692 if (bfun == NULL)
1693 return 0;
1694
1695 DEBUG ("[frame] sniffed frame for %s on level %d",
1696 btrace_get_bfun_name (bfun), bfun->level);
1697
1698 /* This is our frame. Initialize the frame cache. */
1699 cache = bfcache_new (this_frame);
1700 cache->tp = tp;
1701 cache->bfun = bfun;
1702
1703 *this_cache = cache;
1704 return 1;
1705 }
1706
1707 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1708
1709 static int
1710 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1711 struct frame_info *this_frame,
1712 void **this_cache)
1713 {
1714 const struct btrace_function *bfun, *callee;
1715 struct btrace_frame_cache *cache;
1716 struct frame_info *next;
1717
1718 next = get_next_frame (this_frame);
1719 if (next == NULL)
1720 return 0;
1721
1722 callee = btrace_get_frame_function (next);
1723 if (callee == NULL)
1724 return 0;
1725
1726 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1727 return 0;
1728
1729 bfun = callee->up;
1730 if (bfun == NULL)
1731 return 0;
1732
1733 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1734 btrace_get_bfun_name (bfun), bfun->level);
1735
1736 /* This is our frame. Initialize the frame cache. */
1737 cache = bfcache_new (this_frame);
1738 cache->tp = find_thread_ptid (inferior_ptid);
1739 cache->bfun = bfun;
1740
1741 *this_cache = cache;
1742 return 1;
1743 }
1744
1745 static void
1746 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1747 {
1748 struct btrace_frame_cache *cache;
1749 void **slot;
1750
1751 cache = (struct btrace_frame_cache *) this_cache;
1752
1753 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1754 gdb_assert (slot != NULL);
1755
1756 htab_remove_elt (bfcache, cache);
1757 }
1758
1759 /* btrace recording does not store previous memory content, neither the stack
1760 frames content. Any unwinding would return errorneous results as the stack
1761 contents no longer matches the changed PC value restored from history.
1762 Therefore this unwinder reports any possibly unwound registers as
1763 <unavailable>. */
1764
1765 const struct frame_unwind record_btrace_frame_unwind =
1766 {
1767 NORMAL_FRAME,
1768 record_btrace_frame_unwind_stop_reason,
1769 record_btrace_frame_this_id,
1770 record_btrace_frame_prev_register,
1771 NULL,
1772 record_btrace_frame_sniffer,
1773 record_btrace_frame_dealloc_cache
1774 };
1775
1776 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1777 {
1778 TAILCALL_FRAME,
1779 record_btrace_frame_unwind_stop_reason,
1780 record_btrace_frame_this_id,
1781 record_btrace_frame_prev_register,
1782 NULL,
1783 record_btrace_tailcall_frame_sniffer,
1784 record_btrace_frame_dealloc_cache
1785 };
1786
1787 /* Implement the to_get_unwinder method. */
1788
1789 static const struct frame_unwind *
1790 record_btrace_to_get_unwinder (struct target_ops *self)
1791 {
1792 return &record_btrace_frame_unwind;
1793 }
1794
1795 /* Implement the to_get_tailcall_unwinder method. */
1796
1797 static const struct frame_unwind *
1798 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1799 {
1800 return &record_btrace_tailcall_frame_unwind;
1801 }
1802
1803 /* Return a human-readable string for FLAG. */
1804
1805 static const char *
1806 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1807 {
1808 switch (flag)
1809 {
1810 case BTHR_STEP:
1811 return "step";
1812
1813 case BTHR_RSTEP:
1814 return "reverse-step";
1815
1816 case BTHR_CONT:
1817 return "cont";
1818
1819 case BTHR_RCONT:
1820 return "reverse-cont";
1821
1822 case BTHR_STOP:
1823 return "stop";
1824 }
1825
1826 return "<invalid>";
1827 }
1828
1829 /* Indicate that TP should be resumed according to FLAG. */
1830
1831 static void
1832 record_btrace_resume_thread (struct thread_info *tp,
1833 enum btrace_thread_flag flag)
1834 {
1835 struct btrace_thread_info *btinfo;
1836
1837 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1838 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1839
1840 btinfo = &tp->btrace;
1841
1842 /* Fetch the latest branch trace. */
1843 btrace_fetch (tp);
1844
1845 /* A resume request overwrites a preceding resume or stop request. */
1846 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1847 btinfo->flags |= flag;
1848 }
1849
1850 /* Get the current frame for TP. */
1851
1852 static struct frame_info *
1853 get_thread_current_frame (struct thread_info *tp)
1854 {
1855 struct frame_info *frame;
1856 ptid_t old_inferior_ptid;
1857 int executing;
1858
1859 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1860 old_inferior_ptid = inferior_ptid;
1861 inferior_ptid = tp->ptid;
1862
1863 /* Clear the executing flag to allow changes to the current frame.
1864 We are not actually running, yet. We just started a reverse execution
1865 command or a record goto command.
1866 For the latter, EXECUTING is false and this has no effect.
1867 For the former, EXECUTING is true and we're in to_wait, about to
1868 move the thread. Since we need to recompute the stack, we temporarily
1869 set EXECUTING to flase. */
1870 executing = is_executing (inferior_ptid);
1871 set_executing (inferior_ptid, 0);
1872
1873 frame = NULL;
1874 TRY
1875 {
1876 frame = get_current_frame ();
1877 }
1878 CATCH (except, RETURN_MASK_ALL)
1879 {
1880 /* Restore the previous execution state. */
1881 set_executing (inferior_ptid, executing);
1882
1883 /* Restore the previous inferior_ptid. */
1884 inferior_ptid = old_inferior_ptid;
1885
1886 throw_exception (except);
1887 }
1888 END_CATCH
1889
1890 /* Restore the previous execution state. */
1891 set_executing (inferior_ptid, executing);
1892
1893 /* Restore the previous inferior_ptid. */
1894 inferior_ptid = old_inferior_ptid;
1895
1896 return frame;
1897 }
1898
1899 /* Start replaying a thread. */
1900
1901 static struct btrace_insn_iterator *
1902 record_btrace_start_replaying (struct thread_info *tp)
1903 {
1904 struct btrace_insn_iterator *replay;
1905 struct btrace_thread_info *btinfo;
1906
1907 btinfo = &tp->btrace;
1908 replay = NULL;
1909
1910 /* We can't start replaying without trace. */
1911 if (btinfo->functions.empty ())
1912 return NULL;
1913
1914 /* GDB stores the current frame_id when stepping in order to detects steps
1915 into subroutines.
1916 Since frames are computed differently when we're replaying, we need to
1917 recompute those stored frames and fix them up so we can still detect
1918 subroutines after we started replaying. */
1919 TRY
1920 {
1921 struct frame_info *frame;
1922 struct frame_id frame_id;
1923 int upd_step_frame_id, upd_step_stack_frame_id;
1924
1925 /* The current frame without replaying - computed via normal unwind. */
1926 frame = get_thread_current_frame (tp);
1927 frame_id = get_frame_id (frame);
1928
1929 /* Check if we need to update any stepping-related frame id's. */
1930 upd_step_frame_id = frame_id_eq (frame_id,
1931 tp->control.step_frame_id);
1932 upd_step_stack_frame_id = frame_id_eq (frame_id,
1933 tp->control.step_stack_frame_id);
1934
1935 /* We start replaying at the end of the branch trace. This corresponds
1936 to the current instruction. */
1937 replay = XNEW (struct btrace_insn_iterator);
1938 btrace_insn_end (replay, btinfo);
1939
1940 /* Skip gaps at the end of the trace. */
1941 while (btrace_insn_get (replay) == NULL)
1942 {
1943 unsigned int steps;
1944
1945 steps = btrace_insn_prev (replay, 1);
1946 if (steps == 0)
1947 error (_("No trace."));
1948 }
1949
1950 /* We're not replaying, yet. */
1951 gdb_assert (btinfo->replay == NULL);
1952 btinfo->replay = replay;
1953
1954 /* Make sure we're not using any stale registers. */
1955 registers_changed_ptid (tp->ptid);
1956
1957 /* The current frame with replaying - computed via btrace unwind. */
1958 frame = get_thread_current_frame (tp);
1959 frame_id = get_frame_id (frame);
1960
1961 /* Replace stepping related frames where necessary. */
1962 if (upd_step_frame_id)
1963 tp->control.step_frame_id = frame_id;
1964 if (upd_step_stack_frame_id)
1965 tp->control.step_stack_frame_id = frame_id;
1966 }
1967 CATCH (except, RETURN_MASK_ALL)
1968 {
1969 xfree (btinfo->replay);
1970 btinfo->replay = NULL;
1971
1972 registers_changed_ptid (tp->ptid);
1973
1974 throw_exception (except);
1975 }
1976 END_CATCH
1977
1978 return replay;
1979 }
1980
1981 /* Stop replaying a thread. */
1982
1983 static void
1984 record_btrace_stop_replaying (struct thread_info *tp)
1985 {
1986 struct btrace_thread_info *btinfo;
1987
1988 btinfo = &tp->btrace;
1989
1990 xfree (btinfo->replay);
1991 btinfo->replay = NULL;
1992
1993 /* Make sure we're not leaving any stale registers. */
1994 registers_changed_ptid (tp->ptid);
1995 }
1996
1997 /* Stop replaying TP if it is at the end of its execution history. */
1998
1999 static void
2000 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2001 {
2002 struct btrace_insn_iterator *replay, end;
2003 struct btrace_thread_info *btinfo;
2004
2005 btinfo = &tp->btrace;
2006 replay = btinfo->replay;
2007
2008 if (replay == NULL)
2009 return;
2010
2011 btrace_insn_end (&end, btinfo);
2012
2013 if (btrace_insn_cmp (replay, &end) == 0)
2014 record_btrace_stop_replaying (tp);
2015 }
2016
2017 /* The to_resume method of target record-btrace. */
2018
2019 static void
2020 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2021 enum gdb_signal signal)
2022 {
2023 struct thread_info *tp;
2024 enum btrace_thread_flag flag, cflag;
2025
2026 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2027 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2028 step ? "step" : "cont");
2029
2030 /* Store the execution direction of the last resume.
2031
2032 If there is more than one to_resume call, we have to rely on infrun
2033 to not change the execution direction in-between. */
2034 record_btrace_resume_exec_dir = execution_direction;
2035
2036 /* As long as we're not replaying, just forward the request.
2037
2038 For non-stop targets this means that no thread is replaying. In order to
2039 make progress, we may need to explicitly move replaying threads to the end
2040 of their execution history. */
2041 if ((execution_direction != EXEC_REVERSE)
2042 && !record_btrace_is_replaying (ops, minus_one_ptid))
2043 {
2044 ops = ops->beneath;
2045 ops->to_resume (ops, ptid, step, signal);
2046 return;
2047 }
2048
2049 /* Compute the btrace thread flag for the requested move. */
2050 if (execution_direction == EXEC_REVERSE)
2051 {
2052 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2053 cflag = BTHR_RCONT;
2054 }
2055 else
2056 {
2057 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2058 cflag = BTHR_CONT;
2059 }
2060
2061 /* We just indicate the resume intent here. The actual stepping happens in
2062 record_btrace_wait below.
2063
2064 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2065 if (!target_is_non_stop_p ())
2066 {
2067 gdb_assert (ptid_match (inferior_ptid, ptid));
2068
2069 ALL_NON_EXITED_THREADS (tp)
2070 if (ptid_match (tp->ptid, ptid))
2071 {
2072 if (ptid_match (tp->ptid, inferior_ptid))
2073 record_btrace_resume_thread (tp, flag);
2074 else
2075 record_btrace_resume_thread (tp, cflag);
2076 }
2077 }
2078 else
2079 {
2080 ALL_NON_EXITED_THREADS (tp)
2081 if (ptid_match (tp->ptid, ptid))
2082 record_btrace_resume_thread (tp, flag);
2083 }
2084
2085 /* Async support. */
2086 if (target_can_async_p ())
2087 {
2088 target_async (1);
2089 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2090 }
2091 }
2092
2093 /* The to_commit_resume method of target record-btrace. */
2094
2095 static void
2096 record_btrace_commit_resume (struct target_ops *ops)
2097 {
2098 if ((execution_direction != EXEC_REVERSE)
2099 && !record_btrace_is_replaying (ops, minus_one_ptid))
2100 ops->beneath->to_commit_resume (ops->beneath);
2101 }
2102
2103 /* Cancel resuming TP. */
2104
2105 static void
2106 record_btrace_cancel_resume (struct thread_info *tp)
2107 {
2108 enum btrace_thread_flag flags;
2109
2110 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2111 if (flags == 0)
2112 return;
2113
2114 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2115 print_thread_id (tp),
2116 target_pid_to_str (tp->ptid), flags,
2117 btrace_thread_flag_to_str (flags));
2118
2119 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2120 record_btrace_stop_replaying_at_end (tp);
2121 }
2122
2123 /* Return a target_waitstatus indicating that we ran out of history. */
2124
2125 static struct target_waitstatus
2126 btrace_step_no_history (void)
2127 {
2128 struct target_waitstatus status;
2129
2130 status.kind = TARGET_WAITKIND_NO_HISTORY;
2131
2132 return status;
2133 }
2134
2135 /* Return a target_waitstatus indicating that a step finished. */
2136
2137 static struct target_waitstatus
2138 btrace_step_stopped (void)
2139 {
2140 struct target_waitstatus status;
2141
2142 status.kind = TARGET_WAITKIND_STOPPED;
2143 status.value.sig = GDB_SIGNAL_TRAP;
2144
2145 return status;
2146 }
2147
2148 /* Return a target_waitstatus indicating that a thread was stopped as
2149 requested. */
2150
2151 static struct target_waitstatus
2152 btrace_step_stopped_on_request (void)
2153 {
2154 struct target_waitstatus status;
2155
2156 status.kind = TARGET_WAITKIND_STOPPED;
2157 status.value.sig = GDB_SIGNAL_0;
2158
2159 return status;
2160 }
2161
2162 /* Return a target_waitstatus indicating a spurious stop. */
2163
2164 static struct target_waitstatus
2165 btrace_step_spurious (void)
2166 {
2167 struct target_waitstatus status;
2168
2169 status.kind = TARGET_WAITKIND_SPURIOUS;
2170
2171 return status;
2172 }
2173
2174 /* Return a target_waitstatus indicating that the thread was not resumed. */
2175
2176 static struct target_waitstatus
2177 btrace_step_no_resumed (void)
2178 {
2179 struct target_waitstatus status;
2180
2181 status.kind = TARGET_WAITKIND_NO_RESUMED;
2182
2183 return status;
2184 }
2185
2186 /* Return a target_waitstatus indicating that we should wait again. */
2187
2188 static struct target_waitstatus
2189 btrace_step_again (void)
2190 {
2191 struct target_waitstatus status;
2192
2193 status.kind = TARGET_WAITKIND_IGNORE;
2194
2195 return status;
2196 }
2197
2198 /* Clear the record histories. */
2199
2200 static void
2201 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2202 {
2203 xfree (btinfo->insn_history);
2204 xfree (btinfo->call_history);
2205
2206 btinfo->insn_history = NULL;
2207 btinfo->call_history = NULL;
2208 }
2209
2210 /* Check whether TP's current replay position is at a breakpoint. */
2211
2212 static int
2213 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2214 {
2215 struct btrace_insn_iterator *replay;
2216 struct btrace_thread_info *btinfo;
2217 const struct btrace_insn *insn;
2218 struct inferior *inf;
2219
2220 btinfo = &tp->btrace;
2221 replay = btinfo->replay;
2222
2223 if (replay == NULL)
2224 return 0;
2225
2226 insn = btrace_insn_get (replay);
2227 if (insn == NULL)
2228 return 0;
2229
2230 inf = find_inferior_ptid (tp->ptid);
2231 if (inf == NULL)
2232 return 0;
2233
2234 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2235 &btinfo->stop_reason);
2236 }
2237
2238 /* Step one instruction in forward direction. */
2239
2240 static struct target_waitstatus
2241 record_btrace_single_step_forward (struct thread_info *tp)
2242 {
2243 struct btrace_insn_iterator *replay, end, start;
2244 struct btrace_thread_info *btinfo;
2245
2246 btinfo = &tp->btrace;
2247 replay = btinfo->replay;
2248
2249 /* We're done if we're not replaying. */
2250 if (replay == NULL)
2251 return btrace_step_no_history ();
2252
2253 /* Check if we're stepping a breakpoint. */
2254 if (record_btrace_replay_at_breakpoint (tp))
2255 return btrace_step_stopped ();
2256
2257 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2258 jump back to the instruction at which we started. */
2259 start = *replay;
2260 do
2261 {
2262 unsigned int steps;
2263
2264 /* We will bail out here if we continue stepping after reaching the end
2265 of the execution history. */
2266 steps = btrace_insn_next (replay, 1);
2267 if (steps == 0)
2268 {
2269 *replay = start;
2270 return btrace_step_no_history ();
2271 }
2272 }
2273 while (btrace_insn_get (replay) == NULL);
2274
2275 /* Determine the end of the instruction trace. */
2276 btrace_insn_end (&end, btinfo);
2277
2278 /* The execution trace contains (and ends with) the current instruction.
2279 This instruction has not been executed, yet, so the trace really ends
2280 one instruction earlier. */
2281 if (btrace_insn_cmp (replay, &end) == 0)
2282 return btrace_step_no_history ();
2283
2284 return btrace_step_spurious ();
2285 }
2286
2287 /* Step one instruction in backward direction. */
2288
2289 static struct target_waitstatus
2290 record_btrace_single_step_backward (struct thread_info *tp)
2291 {
2292 struct btrace_insn_iterator *replay, start;
2293 struct btrace_thread_info *btinfo;
2294
2295 btinfo = &tp->btrace;
2296 replay = btinfo->replay;
2297
2298 /* Start replaying if we're not already doing so. */
2299 if (replay == NULL)
2300 replay = record_btrace_start_replaying (tp);
2301
2302 /* If we can't step any further, we reached the end of the history.
2303 Skip gaps during replay. If we end up at a gap (at the beginning of
2304 the trace), jump back to the instruction at which we started. */
2305 start = *replay;
2306 do
2307 {
2308 unsigned int steps;
2309
2310 steps = btrace_insn_prev (replay, 1);
2311 if (steps == 0)
2312 {
2313 *replay = start;
2314 return btrace_step_no_history ();
2315 }
2316 }
2317 while (btrace_insn_get (replay) == NULL);
2318
2319 /* Check if we're stepping a breakpoint.
2320
2321 For reverse-stepping, this check is after the step. There is logic in
2322 infrun.c that handles reverse-stepping separately. See, for example,
2323 proceed and adjust_pc_after_break.
2324
2325 This code assumes that for reverse-stepping, PC points to the last
2326 de-executed instruction, whereas for forward-stepping PC points to the
2327 next to-be-executed instruction. */
2328 if (record_btrace_replay_at_breakpoint (tp))
2329 return btrace_step_stopped ();
2330
2331 return btrace_step_spurious ();
2332 }
2333
2334 /* Step a single thread. */
2335
2336 static struct target_waitstatus
2337 record_btrace_step_thread (struct thread_info *tp)
2338 {
2339 struct btrace_thread_info *btinfo;
2340 struct target_waitstatus status;
2341 enum btrace_thread_flag flags;
2342
2343 btinfo = &tp->btrace;
2344
2345 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2346 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2347
2348 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2349 target_pid_to_str (tp->ptid), flags,
2350 btrace_thread_flag_to_str (flags));
2351
2352 /* We can't step without an execution history. */
2353 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2354 return btrace_step_no_history ();
2355
2356 switch (flags)
2357 {
2358 default:
2359 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2360
2361 case BTHR_STOP:
2362 return btrace_step_stopped_on_request ();
2363
2364 case BTHR_STEP:
2365 status = record_btrace_single_step_forward (tp);
2366 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2367 break;
2368
2369 return btrace_step_stopped ();
2370
2371 case BTHR_RSTEP:
2372 status = record_btrace_single_step_backward (tp);
2373 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2374 break;
2375
2376 return btrace_step_stopped ();
2377
2378 case BTHR_CONT:
2379 status = record_btrace_single_step_forward (tp);
2380 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2381 break;
2382
2383 btinfo->flags |= flags;
2384 return btrace_step_again ();
2385
2386 case BTHR_RCONT:
2387 status = record_btrace_single_step_backward (tp);
2388 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2389 break;
2390
2391 btinfo->flags |= flags;
2392 return btrace_step_again ();
2393 }
2394
2395 /* We keep threads moving at the end of their execution history. The to_wait
2396 method will stop the thread for whom the event is reported. */
2397 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2398 btinfo->flags |= flags;
2399
2400 return status;
2401 }
2402
2403 /* A vector of threads. */
2404
2405 typedef struct thread_info * tp_t;
2406 DEF_VEC_P (tp_t);
2407
2408 /* Announce further events if necessary. */
2409
2410 static void
2411 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2412 const VEC (tp_t) *no_history)
2413 {
2414 int more_moving, more_no_history;
2415
2416 more_moving = !VEC_empty (tp_t, moving);
2417 more_no_history = !VEC_empty (tp_t, no_history);
2418
2419 if (!more_moving && !more_no_history)
2420 return;
2421
2422 if (more_moving)
2423 DEBUG ("movers pending");
2424
2425 if (more_no_history)
2426 DEBUG ("no-history pending");
2427
2428 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2429 }
2430
2431 /* The to_wait method of target record-btrace. */
2432
2433 static ptid_t
2434 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2435 struct target_waitstatus *status, int options)
2436 {
2437 VEC (tp_t) *moving, *no_history;
2438 struct thread_info *tp, *eventing;
2439 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2440
2441 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2442
2443 /* As long as we're not replaying, just forward the request. */
2444 if ((execution_direction != EXEC_REVERSE)
2445 && !record_btrace_is_replaying (ops, minus_one_ptid))
2446 {
2447 ops = ops->beneath;
2448 return ops->to_wait (ops, ptid, status, options);
2449 }
2450
2451 moving = NULL;
2452 no_history = NULL;
2453
2454 make_cleanup (VEC_cleanup (tp_t), &moving);
2455 make_cleanup (VEC_cleanup (tp_t), &no_history);
2456
2457 /* Keep a work list of moving threads. */
2458 ALL_NON_EXITED_THREADS (tp)
2459 if (ptid_match (tp->ptid, ptid)
2460 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2461 VEC_safe_push (tp_t, moving, tp);
2462
2463 if (VEC_empty (tp_t, moving))
2464 {
2465 *status = btrace_step_no_resumed ();
2466
2467 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2468 target_waitstatus_to_string (status));
2469
2470 do_cleanups (cleanups);
2471 return null_ptid;
2472 }
2473
2474 /* Step moving threads one by one, one step each, until either one thread
2475 reports an event or we run out of threads to step.
2476
2477 When stepping more than one thread, chances are that some threads reach
2478 the end of their execution history earlier than others. If we reported
2479 this immediately, all-stop on top of non-stop would stop all threads and
2480 resume the same threads next time. And we would report the same thread
2481 having reached the end of its execution history again.
2482
2483 In the worst case, this would starve the other threads. But even if other
2484 threads would be allowed to make progress, this would result in far too
2485 many intermediate stops.
2486
2487 We therefore delay the reporting of "no execution history" until we have
2488 nothing else to report. By this time, all threads should have moved to
2489 either the beginning or the end of their execution history. There will
2490 be a single user-visible stop. */
2491 eventing = NULL;
2492 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2493 {
2494 unsigned int ix;
2495
2496 ix = 0;
2497 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2498 {
2499 *status = record_btrace_step_thread (tp);
2500
2501 switch (status->kind)
2502 {
2503 case TARGET_WAITKIND_IGNORE:
2504 ix++;
2505 break;
2506
2507 case TARGET_WAITKIND_NO_HISTORY:
2508 VEC_safe_push (tp_t, no_history,
2509 VEC_ordered_remove (tp_t, moving, ix));
2510 break;
2511
2512 default:
2513 eventing = VEC_unordered_remove (tp_t, moving, ix);
2514 break;
2515 }
2516 }
2517 }
2518
2519 if (eventing == NULL)
2520 {
2521 /* We started with at least one moving thread. This thread must have
2522 either stopped or reached the end of its execution history.
2523
2524 In the former case, EVENTING must not be NULL.
2525 In the latter case, NO_HISTORY must not be empty. */
2526 gdb_assert (!VEC_empty (tp_t, no_history));
2527
2528 /* We kept threads moving at the end of their execution history. Stop
2529 EVENTING now that we are going to report its stop. */
2530 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2531 eventing->btrace.flags &= ~BTHR_MOVE;
2532
2533 *status = btrace_step_no_history ();
2534 }
2535
2536 gdb_assert (eventing != NULL);
2537
2538 /* We kept threads replaying at the end of their execution history. Stop
2539 replaying EVENTING now that we are going to report its stop. */
2540 record_btrace_stop_replaying_at_end (eventing);
2541
2542 /* Stop all other threads. */
2543 if (!target_is_non_stop_p ())
2544 ALL_NON_EXITED_THREADS (tp)
2545 record_btrace_cancel_resume (tp);
2546
2547 /* In async mode, we need to announce further events. */
2548 if (target_is_async_p ())
2549 record_btrace_maybe_mark_async_event (moving, no_history);
2550
2551 /* Start record histories anew from the current position. */
2552 record_btrace_clear_histories (&eventing->btrace);
2553
2554 /* We moved the replay position but did not update registers. */
2555 registers_changed_ptid (eventing->ptid);
2556
2557 DEBUG ("wait ended by thread %s (%s): %s",
2558 print_thread_id (eventing),
2559 target_pid_to_str (eventing->ptid),
2560 target_waitstatus_to_string (status));
2561
2562 do_cleanups (cleanups);
2563 return eventing->ptid;
2564 }
2565
2566 /* The to_stop method of target record-btrace. */
2567
2568 static void
2569 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2570 {
2571 DEBUG ("stop %s", target_pid_to_str (ptid));
2572
2573 /* As long as we're not replaying, just forward the request. */
2574 if ((execution_direction != EXEC_REVERSE)
2575 && !record_btrace_is_replaying (ops, minus_one_ptid))
2576 {
2577 ops = ops->beneath;
2578 ops->to_stop (ops, ptid);
2579 }
2580 else
2581 {
2582 struct thread_info *tp;
2583
2584 ALL_NON_EXITED_THREADS (tp)
2585 if (ptid_match (tp->ptid, ptid))
2586 {
2587 tp->btrace.flags &= ~BTHR_MOVE;
2588 tp->btrace.flags |= BTHR_STOP;
2589 }
2590 }
2591 }
2592
2593 /* The to_can_execute_reverse method of target record-btrace. */
2594
2595 static int
2596 record_btrace_can_execute_reverse (struct target_ops *self)
2597 {
2598 return 1;
2599 }
2600
2601 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2602
2603 static int
2604 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2605 {
2606 if (record_btrace_is_replaying (ops, minus_one_ptid))
2607 {
2608 struct thread_info *tp = inferior_thread ();
2609
2610 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2611 }
2612
2613 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2614 }
2615
2616 /* The to_supports_stopped_by_sw_breakpoint method of target
2617 record-btrace. */
2618
2619 static int
2620 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2621 {
2622 if (record_btrace_is_replaying (ops, minus_one_ptid))
2623 return 1;
2624
2625 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2626 }
2627
2628 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2629
2630 static int
2631 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2632 {
2633 if (record_btrace_is_replaying (ops, minus_one_ptid))
2634 {
2635 struct thread_info *tp = inferior_thread ();
2636
2637 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2638 }
2639
2640 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2641 }
2642
2643 /* The to_supports_stopped_by_hw_breakpoint method of target
2644 record-btrace. */
2645
2646 static int
2647 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2648 {
2649 if (record_btrace_is_replaying (ops, minus_one_ptid))
2650 return 1;
2651
2652 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2653 }
2654
2655 /* The to_update_thread_list method of target record-btrace. */
2656
2657 static void
2658 record_btrace_update_thread_list (struct target_ops *ops)
2659 {
2660 /* We don't add or remove threads during replay. */
2661 if (record_btrace_is_replaying (ops, minus_one_ptid))
2662 return;
2663
2664 /* Forward the request. */
2665 ops = ops->beneath;
2666 ops->to_update_thread_list (ops);
2667 }
2668
2669 /* The to_thread_alive method of target record-btrace. */
2670
2671 static int
2672 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2673 {
2674 /* We don't add or remove threads during replay. */
2675 if (record_btrace_is_replaying (ops, minus_one_ptid))
2676 return find_thread_ptid (ptid) != NULL;
2677
2678 /* Forward the request. */
2679 ops = ops->beneath;
2680 return ops->to_thread_alive (ops, ptid);
2681 }
2682
2683 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2684 is stopped. */
2685
2686 static void
2687 record_btrace_set_replay (struct thread_info *tp,
2688 const struct btrace_insn_iterator *it)
2689 {
2690 struct btrace_thread_info *btinfo;
2691
2692 btinfo = &tp->btrace;
2693
2694 if (it == NULL)
2695 record_btrace_stop_replaying (tp);
2696 else
2697 {
2698 if (btinfo->replay == NULL)
2699 record_btrace_start_replaying (tp);
2700 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2701 return;
2702
2703 *btinfo->replay = *it;
2704 registers_changed_ptid (tp->ptid);
2705 }
2706
2707 /* Start anew from the new replay position. */
2708 record_btrace_clear_histories (btinfo);
2709
2710 stop_pc = regcache_read_pc (get_current_regcache ());
2711 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2712 }
2713
2714 /* The to_goto_record_begin method of target record-btrace. */
2715
2716 static void
2717 record_btrace_goto_begin (struct target_ops *self)
2718 {
2719 struct thread_info *tp;
2720 struct btrace_insn_iterator begin;
2721
2722 tp = require_btrace_thread ();
2723
2724 btrace_insn_begin (&begin, &tp->btrace);
2725
2726 /* Skip gaps at the beginning of the trace. */
2727 while (btrace_insn_get (&begin) == NULL)
2728 {
2729 unsigned int steps;
2730
2731 steps = btrace_insn_next (&begin, 1);
2732 if (steps == 0)
2733 error (_("No trace."));
2734 }
2735
2736 record_btrace_set_replay (tp, &begin);
2737 }
2738
2739 /* The to_goto_record_end method of target record-btrace. */
2740
2741 static void
2742 record_btrace_goto_end (struct target_ops *ops)
2743 {
2744 struct thread_info *tp;
2745
2746 tp = require_btrace_thread ();
2747
2748 record_btrace_set_replay (tp, NULL);
2749 }
2750
2751 /* The to_goto_record method of target record-btrace. */
2752
2753 static void
2754 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2755 {
2756 struct thread_info *tp;
2757 struct btrace_insn_iterator it;
2758 unsigned int number;
2759 int found;
2760
2761 number = insn;
2762
2763 /* Check for wrap-arounds. */
2764 if (number != insn)
2765 error (_("Instruction number out of range."));
2766
2767 tp = require_btrace_thread ();
2768
2769 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2770
2771 /* Check if the instruction could not be found or is a gap. */
2772 if (found == 0 || btrace_insn_get (&it) == NULL)
2773 error (_("No such instruction."));
2774
2775 record_btrace_set_replay (tp, &it);
2776 }
2777
2778 /* The to_record_stop_replaying method of target record-btrace. */
2779
2780 static void
2781 record_btrace_stop_replaying_all (struct target_ops *self)
2782 {
2783 struct thread_info *tp;
2784
2785 ALL_NON_EXITED_THREADS (tp)
2786 record_btrace_stop_replaying (tp);
2787 }
2788
2789 /* The to_execution_direction target method. */
2790
2791 static enum exec_direction_kind
2792 record_btrace_execution_direction (struct target_ops *self)
2793 {
2794 return record_btrace_resume_exec_dir;
2795 }
2796
2797 /* The to_prepare_to_generate_core target method. */
2798
2799 static void
2800 record_btrace_prepare_to_generate_core (struct target_ops *self)
2801 {
2802 record_btrace_generating_corefile = 1;
2803 }
2804
2805 /* The to_done_generating_core target method. */
2806
2807 static void
2808 record_btrace_done_generating_core (struct target_ops *self)
2809 {
2810 record_btrace_generating_corefile = 0;
2811 }
2812
2813 /* Initialize the record-btrace target ops. */
2814
2815 static void
2816 init_record_btrace_ops (void)
2817 {
2818 struct target_ops *ops;
2819
2820 ops = &record_btrace_ops;
2821 ops->to_shortname = "record-btrace";
2822 ops->to_longname = "Branch tracing target";
2823 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2824 ops->to_open = record_btrace_open;
2825 ops->to_close = record_btrace_close;
2826 ops->to_async = record_btrace_async;
2827 ops->to_detach = record_detach;
2828 ops->to_disconnect = record_btrace_disconnect;
2829 ops->to_mourn_inferior = record_mourn_inferior;
2830 ops->to_kill = record_kill;
2831 ops->to_stop_recording = record_btrace_stop_recording;
2832 ops->to_info_record = record_btrace_info;
2833 ops->to_insn_history = record_btrace_insn_history;
2834 ops->to_insn_history_from = record_btrace_insn_history_from;
2835 ops->to_insn_history_range = record_btrace_insn_history_range;
2836 ops->to_call_history = record_btrace_call_history;
2837 ops->to_call_history_from = record_btrace_call_history_from;
2838 ops->to_call_history_range = record_btrace_call_history_range;
2839 ops->to_record_method = record_btrace_record_method;
2840 ops->to_record_is_replaying = record_btrace_is_replaying;
2841 ops->to_record_will_replay = record_btrace_will_replay;
2842 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2843 ops->to_xfer_partial = record_btrace_xfer_partial;
2844 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2845 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2846 ops->to_fetch_registers = record_btrace_fetch_registers;
2847 ops->to_store_registers = record_btrace_store_registers;
2848 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2849 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2850 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2851 ops->to_resume = record_btrace_resume;
2852 ops->to_commit_resume = record_btrace_commit_resume;
2853 ops->to_wait = record_btrace_wait;
2854 ops->to_stop = record_btrace_stop;
2855 ops->to_update_thread_list = record_btrace_update_thread_list;
2856 ops->to_thread_alive = record_btrace_thread_alive;
2857 ops->to_goto_record_begin = record_btrace_goto_begin;
2858 ops->to_goto_record_end = record_btrace_goto_end;
2859 ops->to_goto_record = record_btrace_goto;
2860 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2861 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2862 ops->to_supports_stopped_by_sw_breakpoint
2863 = record_btrace_supports_stopped_by_sw_breakpoint;
2864 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2865 ops->to_supports_stopped_by_hw_breakpoint
2866 = record_btrace_supports_stopped_by_hw_breakpoint;
2867 ops->to_execution_direction = record_btrace_execution_direction;
2868 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2869 ops->to_done_generating_core = record_btrace_done_generating_core;
2870 ops->to_stratum = record_stratum;
2871 ops->to_magic = OPS_MAGIC;
2872 }
2873
2874 /* Start recording in BTS format. */
2875
2876 static void
2877 cmd_record_btrace_bts_start (char *args, int from_tty)
2878 {
2879 if (args != NULL && *args != 0)
2880 error (_("Invalid argument."));
2881
2882 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2883
2884 TRY
2885 {
2886 execute_command ((char *) "target record-btrace", from_tty);
2887 }
2888 CATCH (exception, RETURN_MASK_ALL)
2889 {
2890 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2891 throw_exception (exception);
2892 }
2893 END_CATCH
2894 }
2895
2896 /* Start recording in Intel Processor Trace format. */
2897
2898 static void
2899 cmd_record_btrace_pt_start (char *args, int from_tty)
2900 {
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2903
2904 record_btrace_conf.format = BTRACE_FORMAT_PT;
2905
2906 TRY
2907 {
2908 execute_command ((char *) "target record-btrace", from_tty);
2909 }
2910 CATCH (exception, RETURN_MASK_ALL)
2911 {
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2913 throw_exception (exception);
2914 }
2915 END_CATCH
2916 }
2917
2918 /* Alias for "target record". */
2919
2920 static void
2921 cmd_record_btrace_start (char *args, int from_tty)
2922 {
2923 if (args != NULL && *args != 0)
2924 error (_("Invalid argument."));
2925
2926 record_btrace_conf.format = BTRACE_FORMAT_PT;
2927
2928 TRY
2929 {
2930 execute_command ((char *) "target record-btrace", from_tty);
2931 }
2932 CATCH (exception, RETURN_MASK_ALL)
2933 {
2934 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2935
2936 TRY
2937 {
2938 execute_command ((char *) "target record-btrace", from_tty);
2939 }
2940 CATCH (exception, RETURN_MASK_ALL)
2941 {
2942 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2943 throw_exception (exception);
2944 }
2945 END_CATCH
2946 }
2947 END_CATCH
2948 }
2949
2950 /* The "set record btrace" command. */
2951
2952 static void
2953 cmd_set_record_btrace (char *args, int from_tty)
2954 {
2955 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2956 }
2957
2958 /* The "show record btrace" command. */
2959
2960 static void
2961 cmd_show_record_btrace (char *args, int from_tty)
2962 {
2963 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2964 }
2965
2966 /* The "show record btrace replay-memory-access" command. */
2967
2968 static void
2969 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2970 struct cmd_list_element *c, const char *value)
2971 {
2972 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2973 replay_memory_access);
2974 }
2975
2976 /* The "set record btrace bts" command. */
2977
2978 static void
2979 cmd_set_record_btrace_bts (char *args, int from_tty)
2980 {
2981 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2982 "by an appropriate subcommand.\n"));
2983 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2984 all_commands, gdb_stdout);
2985 }
2986
2987 /* The "show record btrace bts" command. */
2988
2989 static void
2990 cmd_show_record_btrace_bts (char *args, int from_tty)
2991 {
2992 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2993 }
2994
2995 /* The "set record btrace pt" command. */
2996
2997 static void
2998 cmd_set_record_btrace_pt (char *args, int from_tty)
2999 {
3000 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3001 "by an appropriate subcommand.\n"));
3002 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3003 all_commands, gdb_stdout);
3004 }
3005
3006 /* The "show record btrace pt" command. */
3007
3008 static void
3009 cmd_show_record_btrace_pt (char *args, int from_tty)
3010 {
3011 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3012 }
3013
3014 /* The "record bts buffer-size" show value function. */
3015
3016 static void
3017 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3018 struct cmd_list_element *c,
3019 const char *value)
3020 {
3021 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3022 value);
3023 }
3024
3025 /* The "record pt buffer-size" show value function. */
3026
3027 static void
3028 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3029 struct cmd_list_element *c,
3030 const char *value)
3031 {
3032 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3033 value);
3034 }
3035
3036 void _initialize_record_btrace (void);
3037
3038 /* Initialize btrace commands. */
3039
3040 void
3041 _initialize_record_btrace (void)
3042 {
3043 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3044 _("Start branch trace recording."), &record_btrace_cmdlist,
3045 "record btrace ", 0, &record_cmdlist);
3046 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3047
3048 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3049 _("\
3050 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3051 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3052 This format may not be available on all processors."),
3053 &record_btrace_cmdlist);
3054 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3055
3056 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3057 _("\
3058 Start branch trace recording in Intel Processor Trace format.\n\n\
3059 This format may not be available on all processors."),
3060 &record_btrace_cmdlist);
3061 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3062
3063 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3064 _("Set record options"), &set_record_btrace_cmdlist,
3065 "set record btrace ", 0, &set_record_cmdlist);
3066
3067 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3068 _("Show record options"), &show_record_btrace_cmdlist,
3069 "show record btrace ", 0, &show_record_cmdlist);
3070
3071 add_setshow_enum_cmd ("replay-memory-access", no_class,
3072 replay_memory_access_types, &replay_memory_access, _("\
3073 Set what memory accesses are allowed during replay."), _("\
3074 Show what memory accesses are allowed during replay."),
3075 _("Default is READ-ONLY.\n\n\
3076 The btrace record target does not trace data.\n\
3077 The memory therefore corresponds to the live target and not \
3078 to the current replay position.\n\n\
3079 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3080 When READ-WRITE, allow accesses to read-only and read-write memory during \
3081 replay."),
3082 NULL, cmd_show_replay_memory_access,
3083 &set_record_btrace_cmdlist,
3084 &show_record_btrace_cmdlist);
3085
3086 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3087 _("Set record btrace bts options"),
3088 &set_record_btrace_bts_cmdlist,
3089 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3090
3091 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3092 _("Show record btrace bts options"),
3093 &show_record_btrace_bts_cmdlist,
3094 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3095
3096 add_setshow_uinteger_cmd ("buffer-size", no_class,
3097 &record_btrace_conf.bts.size,
3098 _("Set the record/replay bts buffer size."),
3099 _("Show the record/replay bts buffer size."), _("\
3100 When starting recording request a trace buffer of this size. \
3101 The actual buffer size may differ from the requested size. \
3102 Use \"info record\" to see the actual buffer size.\n\n\
3103 Bigger buffers allow longer recording but also take more time to process \
3104 the recorded execution trace.\n\n\
3105 The trace buffer size may not be changed while recording."), NULL,
3106 show_record_bts_buffer_size_value,
3107 &set_record_btrace_bts_cmdlist,
3108 &show_record_btrace_bts_cmdlist);
3109
3110 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3111 _("Set record btrace pt options"),
3112 &set_record_btrace_pt_cmdlist,
3113 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3114
3115 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3116 _("Show record btrace pt options"),
3117 &show_record_btrace_pt_cmdlist,
3118 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3119
3120 add_setshow_uinteger_cmd ("buffer-size", no_class,
3121 &record_btrace_conf.pt.size,
3122 _("Set the record/replay pt buffer size."),
3123 _("Show the record/replay pt buffer size."), _("\
3124 Bigger buffers allow longer recording but also take more time to process \
3125 the recorded execution.\n\
3126 The actual buffer size may differ from the requested size. Use \"info record\" \
3127 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3128 &set_record_btrace_pt_cmdlist,
3129 &show_record_btrace_pt_cmdlist);
3130
3131 init_record_btrace_ops ();
3132 add_target (&record_btrace_ops);
3133
3134 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3135 xcalloc, xfree);
3136
3137 record_btrace_conf.bts.size = 64 * 1024;
3138 record_btrace_conf.pt.size = 16 * 1024;
3139 }