]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
btrace: Resume recording after disconnect.
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42
43 /* The target_ops of record-btrace. */
44 static struct target_ops record_btrace_ops;
45
46 /* A new thread observer enabling branch tracing for the new thread. */
47 static struct observer *record_btrace_thread_observer;
48
49 /* Memory access types used in set/show record btrace replay-memory-access. */
50 static const char replay_memory_access_read_only[] = "read-only";
51 static const char replay_memory_access_read_write[] = "read-write";
52 static const char *const replay_memory_access_types[] =
53 {
54 replay_memory_access_read_only,
55 replay_memory_access_read_write,
56 NULL
57 };
58
59 /* The currently allowed replay memory access type. */
60 static const char *replay_memory_access = replay_memory_access_read_only;
61
62 /* Command lists for "set/show record btrace". */
63 static struct cmd_list_element *set_record_btrace_cmdlist;
64 static struct cmd_list_element *show_record_btrace_cmdlist;
65
66 /* The execution direction of the last resume we got. See record-full.c. */
67 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
68
69 /* The async event handler for reverse/replay execution. */
70 static struct async_event_handler *record_btrace_async_inferior_event_handler;
71
72 /* A flag indicating that we are currently generating a core file. */
73 static int record_btrace_generating_corefile;
74
75 /* The current branch trace configuration. */
76 static struct btrace_config record_btrace_conf;
77
78 /* Command list for "record btrace". */
79 static struct cmd_list_element *record_btrace_cmdlist;
80
81 /* Command lists for "set/show record btrace bts". */
82 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
83 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
84
85 /* Command lists for "set/show record btrace pt". */
86 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
87 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
88
89 /* Print a record-btrace debug message. Use do ... while (0) to avoid
90 ambiguities when used in if statements. */
91
92 #define DEBUG(msg, args...) \
93 do \
94 { \
95 if (record_debug != 0) \
96 fprintf_unfiltered (gdb_stdlog, \
97 "[record-btrace] " msg "\n", ##args); \
98 } \
99 while (0)
100
101
102 /* Update the branch trace for the current thread and return a pointer to its
103 thread_info.
104
105 Throws an error if there is no thread or no trace. This function never
106 returns NULL. */
107
108 static struct thread_info *
109 require_btrace_thread (void)
110 {
111 struct thread_info *tp;
112
113 DEBUG ("require");
114
115 tp = find_thread_ptid (inferior_ptid);
116 if (tp == NULL)
117 error (_("No thread."));
118
119 btrace_fetch (tp);
120
121 if (btrace_is_empty (tp))
122 error (_("No trace."));
123
124 return tp;
125 }
126
127 /* Update the branch trace for the current thread and return a pointer to its
128 branch trace information struct.
129
130 Throws an error if there is no thread or no trace. This function never
131 returns NULL. */
132
133 static struct btrace_thread_info *
134 require_btrace (void)
135 {
136 struct thread_info *tp;
137
138 tp = require_btrace_thread ();
139
140 return &tp->btrace;
141 }
142
143 /* Enable branch tracing for one thread. Warn on errors. */
144
145 static void
146 record_btrace_enable_warn (struct thread_info *tp)
147 {
148 TRY
149 {
150 btrace_enable (tp, &record_btrace_conf);
151 }
152 CATCH (error, RETURN_MASK_ERROR)
153 {
154 warning ("%s", error.message);
155 }
156 END_CATCH
157 }
158
159 /* Callback function to disable branch tracing for one thread. */
160
161 static void
162 record_btrace_disable_callback (void *arg)
163 {
164 struct thread_info *tp = (struct thread_info *) arg;
165
166 btrace_disable (tp);
167 }
168
169 /* Enable automatic tracing of new threads. */
170
171 static void
172 record_btrace_auto_enable (void)
173 {
174 DEBUG ("attach thread observer");
175
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
178 }
179
180 /* Disable automatic tracing of new threads. */
181
182 static void
183 record_btrace_auto_disable (void)
184 {
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
187 return;
188
189 DEBUG ("detach thread observer");
190
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
193 }
194
195 /* The record-btrace async event handler function. */
196
197 static void
198 record_btrace_handle_async_inferior_event (gdb_client_data data)
199 {
200 inferior_event_handler (INF_REG_EVENT, NULL);
201 }
202
203 /* See record-btrace.h. */
204
205 void
206 record_btrace_push_target (void)
207 {
208 const char *format;
209
210 record_btrace_auto_enable ();
211
212 push_target (&record_btrace_ops);
213
214 record_btrace_async_inferior_event_handler
215 = create_async_event_handler (record_btrace_handle_async_inferior_event,
216 NULL);
217 record_btrace_generating_corefile = 0;
218
219 format = btrace_format_short_string (record_btrace_conf.format);
220 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
221 }
222
223 /* The to_open method of target record-btrace. */
224
225 static void
226 record_btrace_open (const char *args, int from_tty)
227 {
228 struct cleanup *disable_chain;
229 struct thread_info *tp;
230
231 DEBUG ("open");
232
233 record_preopen ();
234
235 if (!target_has_execution)
236 error (_("The program is not being run."));
237
238 gdb_assert (record_btrace_thread_observer == NULL);
239
240 disable_chain = make_cleanup (null_cleanup, NULL);
241 ALL_NON_EXITED_THREADS (tp)
242 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
243 {
244 btrace_enable (tp, &record_btrace_conf);
245
246 make_cleanup (record_btrace_disable_callback, tp);
247 }
248
249 record_btrace_push_target ();
250
251 discard_cleanups (disable_chain);
252 }
253
254 /* The to_stop_recording method of target record-btrace. */
255
256 static void
257 record_btrace_stop_recording (struct target_ops *self)
258 {
259 struct thread_info *tp;
260
261 DEBUG ("stop recording");
262
263 record_btrace_auto_disable ();
264
265 ALL_NON_EXITED_THREADS (tp)
266 if (tp->btrace.target != NULL)
267 btrace_disable (tp);
268 }
269
270 /* The to_disconnect method of target record-btrace. */
271
272 static void
273 record_btrace_disconnect (struct target_ops *self, const char *args,
274 int from_tty)
275 {
276 struct target_ops *beneath = self->beneath;
277
278 /* Do not stop recording, just clean up GDB side. */
279 unpush_target (self);
280
281 /* Forward disconnect. */
282 beneath->to_disconnect (beneath, args, from_tty);
283 }
284
285 /* The to_close method of target record-btrace. */
286
287 static void
288 record_btrace_close (struct target_ops *self)
289 {
290 struct thread_info *tp;
291
292 if (record_btrace_async_inferior_event_handler != NULL)
293 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
294
295 /* Make sure automatic recording gets disabled even if we did not stop
296 recording before closing the record-btrace target. */
297 record_btrace_auto_disable ();
298
299 /* We should have already stopped recording.
300 Tear down btrace in case we have not. */
301 ALL_NON_EXITED_THREADS (tp)
302 btrace_teardown (tp);
303 }
304
305 /* The to_async method of target record-btrace. */
306
307 static void
308 record_btrace_async (struct target_ops *ops, int enable)
309 {
310 if (enable)
311 mark_async_event_handler (record_btrace_async_inferior_event_handler);
312 else
313 clear_async_event_handler (record_btrace_async_inferior_event_handler);
314
315 ops->beneath->to_async (ops->beneath, enable);
316 }
317
318 /* Adjusts the size and returns a human readable size suffix. */
319
320 static const char *
321 record_btrace_adjust_size (unsigned int *size)
322 {
323 unsigned int sz;
324
325 sz = *size;
326
327 if ((sz & ((1u << 30) - 1)) == 0)
328 {
329 *size = sz >> 30;
330 return "GB";
331 }
332 else if ((sz & ((1u << 20) - 1)) == 0)
333 {
334 *size = sz >> 20;
335 return "MB";
336 }
337 else if ((sz & ((1u << 10) - 1)) == 0)
338 {
339 *size = sz >> 10;
340 return "kB";
341 }
342 else
343 return "";
344 }
345
346 /* Print a BTS configuration. */
347
348 static void
349 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
350 {
351 const char *suffix;
352 unsigned int size;
353
354 size = conf->size;
355 if (size > 0)
356 {
357 suffix = record_btrace_adjust_size (&size);
358 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
359 }
360 }
361
362 /* Print an Intel Processor Trace configuration. */
363
364 static void
365 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
366 {
367 const char *suffix;
368 unsigned int size;
369
370 size = conf->size;
371 if (size > 0)
372 {
373 suffix = record_btrace_adjust_size (&size);
374 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
375 }
376 }
377
378 /* Print a branch tracing configuration. */
379
380 static void
381 record_btrace_print_conf (const struct btrace_config *conf)
382 {
383 printf_unfiltered (_("Recording format: %s.\n"),
384 btrace_format_string (conf->format));
385
386 switch (conf->format)
387 {
388 case BTRACE_FORMAT_NONE:
389 return;
390
391 case BTRACE_FORMAT_BTS:
392 record_btrace_print_bts_conf (&conf->bts);
393 return;
394
395 case BTRACE_FORMAT_PT:
396 record_btrace_print_pt_conf (&conf->pt);
397 return;
398 }
399
400 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
401 }
402
403 /* The to_info_record method of target record-btrace. */
404
405 static void
406 record_btrace_info (struct target_ops *self)
407 {
408 struct btrace_thread_info *btinfo;
409 const struct btrace_config *conf;
410 struct thread_info *tp;
411 unsigned int insns, calls, gaps;
412
413 DEBUG ("info");
414
415 tp = find_thread_ptid (inferior_ptid);
416 if (tp == NULL)
417 error (_("No thread."));
418
419 btinfo = &tp->btrace;
420
421 conf = btrace_conf (btinfo);
422 if (conf != NULL)
423 record_btrace_print_conf (conf);
424
425 btrace_fetch (tp);
426
427 insns = 0;
428 calls = 0;
429 gaps = 0;
430
431 if (!btrace_is_empty (tp))
432 {
433 struct btrace_call_iterator call;
434 struct btrace_insn_iterator insn;
435
436 btrace_call_end (&call, btinfo);
437 btrace_call_prev (&call, 1);
438 calls = btrace_call_number (&call);
439
440 btrace_insn_end (&insn, btinfo);
441
442 insns = btrace_insn_number (&insn);
443 if (insns != 0)
444 {
445 /* The last instruction does not really belong to the trace. */
446 insns -= 1;
447 }
448 else
449 {
450 unsigned int steps;
451
452 /* Skip gaps at the end. */
453 do
454 {
455 steps = btrace_insn_prev (&insn, 1);
456 if (steps == 0)
457 break;
458
459 insns = btrace_insn_number (&insn);
460 }
461 while (insns == 0);
462 }
463
464 gaps = btinfo->ngaps;
465 }
466
467 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
468 "for thread %s (%s).\n"), insns, calls, gaps,
469 print_thread_id (tp), target_pid_to_str (tp->ptid));
470
471 if (btrace_is_replaying (tp))
472 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
473 btrace_insn_number (btinfo->replay));
474 }
475
476 /* Print a decode error. */
477
478 static void
479 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
480 enum btrace_format format)
481 {
482 const char *errstr;
483 int is_error;
484
485 errstr = _("unknown");
486 is_error = 1;
487
488 switch (format)
489 {
490 default:
491 break;
492
493 case BTRACE_FORMAT_BTS:
494 switch (errcode)
495 {
496 default:
497 break;
498
499 case BDE_BTS_OVERFLOW:
500 errstr = _("instruction overflow");
501 break;
502
503 case BDE_BTS_INSN_SIZE:
504 errstr = _("unknown instruction");
505 break;
506 }
507 break;
508
509 #if defined (HAVE_LIBIPT)
510 case BTRACE_FORMAT_PT:
511 switch (errcode)
512 {
513 case BDE_PT_USER_QUIT:
514 is_error = 0;
515 errstr = _("trace decode cancelled");
516 break;
517
518 case BDE_PT_DISABLED:
519 is_error = 0;
520 errstr = _("disabled");
521 break;
522
523 case BDE_PT_OVERFLOW:
524 is_error = 0;
525 errstr = _("overflow");
526 break;
527
528 default:
529 if (errcode < 0)
530 errstr = pt_errstr (pt_errcode (errcode));
531 break;
532 }
533 break;
534 #endif /* defined (HAVE_LIBIPT) */
535 }
536
537 ui_out_text (uiout, _("["));
538 if (is_error)
539 {
540 ui_out_text (uiout, _("decode error ("));
541 ui_out_field_int (uiout, "errcode", errcode);
542 ui_out_text (uiout, _("): "));
543 }
544 ui_out_text (uiout, errstr);
545 ui_out_text (uiout, _("]\n"));
546 }
547
548 /* Print an unsigned int. */
549
550 static void
551 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
552 {
553 ui_out_field_fmt (uiout, fld, "%u", val);
554 }
555
556 /* A range of source lines. */
557
558 struct btrace_line_range
559 {
560 /* The symtab this line is from. */
561 struct symtab *symtab;
562
563 /* The first line (inclusive). */
564 int begin;
565
566 /* The last line (exclusive). */
567 int end;
568 };
569
570 /* Construct a line range. */
571
572 static struct btrace_line_range
573 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
574 {
575 struct btrace_line_range range;
576
577 range.symtab = symtab;
578 range.begin = begin;
579 range.end = end;
580
581 return range;
582 }
583
584 /* Add a line to a line range. */
585
586 static struct btrace_line_range
587 btrace_line_range_add (struct btrace_line_range range, int line)
588 {
589 if (range.end <= range.begin)
590 {
591 /* This is the first entry. */
592 range.begin = line;
593 range.end = line + 1;
594 }
595 else if (line < range.begin)
596 range.begin = line;
597 else if (range.end < line)
598 range.end = line;
599
600 return range;
601 }
602
603 /* Return non-zero if RANGE is empty, zero otherwise. */
604
605 static int
606 btrace_line_range_is_empty (struct btrace_line_range range)
607 {
608 return range.end <= range.begin;
609 }
610
611 /* Return non-zero if LHS contains RHS, zero otherwise. */
612
613 static int
614 btrace_line_range_contains_range (struct btrace_line_range lhs,
615 struct btrace_line_range rhs)
616 {
617 return ((lhs.symtab == rhs.symtab)
618 && (lhs.begin <= rhs.begin)
619 && (rhs.end <= lhs.end));
620 }
621
622 /* Find the line range associated with PC. */
623
624 static struct btrace_line_range
625 btrace_find_line_range (CORE_ADDR pc)
626 {
627 struct btrace_line_range range;
628 struct linetable_entry *lines;
629 struct linetable *ltable;
630 struct symtab *symtab;
631 int nlines, i;
632
633 symtab = find_pc_line_symtab (pc);
634 if (symtab == NULL)
635 return btrace_mk_line_range (NULL, 0, 0);
636
637 ltable = SYMTAB_LINETABLE (symtab);
638 if (ltable == NULL)
639 return btrace_mk_line_range (symtab, 0, 0);
640
641 nlines = ltable->nitems;
642 lines = ltable->item;
643 if (nlines <= 0)
644 return btrace_mk_line_range (symtab, 0, 0);
645
646 range = btrace_mk_line_range (symtab, 0, 0);
647 for (i = 0; i < nlines - 1; i++)
648 {
649 if ((lines[i].pc == pc) && (lines[i].line != 0))
650 range = btrace_line_range_add (range, lines[i].line);
651 }
652
653 return range;
654 }
655
656 /* Print source lines in LINES to UIOUT.
657
658 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
659 instructions corresponding to that source line. When printing a new source
660 line, we do the cleanups for the open chain and open a new cleanup chain for
661 the new source line. If the source line range in LINES is not empty, this
662 function will leave the cleanup chain for the last printed source line open
663 so instructions can be added to it. */
664
665 static void
666 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
667 struct cleanup **ui_item_chain, int flags)
668 {
669 print_source_lines_flags psl_flags;
670 int line;
671
672 psl_flags = 0;
673 if (flags & DISASSEMBLY_FILENAME)
674 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
675
676 for (line = lines.begin; line < lines.end; ++line)
677 {
678 if (*ui_item_chain != NULL)
679 do_cleanups (*ui_item_chain);
680
681 *ui_item_chain
682 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
683
684 print_source_lines (lines.symtab, line, line + 1, psl_flags);
685
686 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
687 }
688 }
689
690 /* Disassemble a section of the recorded instruction trace. */
691
692 static void
693 btrace_insn_history (struct ui_out *uiout,
694 const struct btrace_thread_info *btinfo,
695 const struct btrace_insn_iterator *begin,
696 const struct btrace_insn_iterator *end, int flags)
697 {
698 struct ui_file *stb;
699 struct cleanup *cleanups, *ui_item_chain;
700 struct disassemble_info di;
701 struct gdbarch *gdbarch;
702 struct btrace_insn_iterator it;
703 struct btrace_line_range last_lines;
704
705 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
706 btrace_insn_number (end));
707
708 flags |= DISASSEMBLY_SPECULATIVE;
709
710 gdbarch = target_gdbarch ();
711 stb = mem_fileopen ();
712 cleanups = make_cleanup_ui_file_delete (stb);
713 di = gdb_disassemble_info (gdbarch, stb);
714 last_lines = btrace_mk_line_range (NULL, 0, 0);
715
716 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
717
718 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
719 instructions corresponding to that line. */
720 ui_item_chain = NULL;
721
722 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
723 {
724 const struct btrace_insn *insn;
725
726 insn = btrace_insn_get (&it);
727
728 /* A NULL instruction indicates a gap in the trace. */
729 if (insn == NULL)
730 {
731 const struct btrace_config *conf;
732
733 conf = btrace_conf (btinfo);
734
735 /* We have trace so we must have a configuration. */
736 gdb_assert (conf != NULL);
737
738 btrace_ui_out_decode_error (uiout, it.function->errcode,
739 conf->format);
740 }
741 else
742 {
743 struct disasm_insn dinsn;
744
745 if ((flags & DISASSEMBLY_SOURCE) != 0)
746 {
747 struct btrace_line_range lines;
748
749 lines = btrace_find_line_range (insn->pc);
750 if (!btrace_line_range_is_empty (lines)
751 && !btrace_line_range_contains_range (last_lines, lines))
752 {
753 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
754 last_lines = lines;
755 }
756 else if (ui_item_chain == NULL)
757 {
758 ui_item_chain
759 = make_cleanup_ui_out_tuple_begin_end (uiout,
760 "src_and_asm_line");
761 /* No source information. */
762 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
763 }
764
765 gdb_assert (ui_item_chain != NULL);
766 }
767
768 memset (&dinsn, 0, sizeof (dinsn));
769 dinsn.number = btrace_insn_number (&it);
770 dinsn.addr = insn->pc;
771
772 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
773 dinsn.is_speculative = 1;
774
775 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
776 }
777 }
778
779 do_cleanups (cleanups);
780 }
781
782 /* The to_insn_history method of target record-btrace. */
783
784 static void
785 record_btrace_insn_history (struct target_ops *self, int size, int flags)
786 {
787 struct btrace_thread_info *btinfo;
788 struct btrace_insn_history *history;
789 struct btrace_insn_iterator begin, end;
790 struct cleanup *uiout_cleanup;
791 struct ui_out *uiout;
792 unsigned int context, covered;
793
794 uiout = current_uiout;
795 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
796 "insn history");
797 context = abs (size);
798 if (context == 0)
799 error (_("Bad record instruction-history-size."));
800
801 btinfo = require_btrace ();
802 history = btinfo->insn_history;
803 if (history == NULL)
804 {
805 struct btrace_insn_iterator *replay;
806
807 DEBUG ("insn-history (0x%x): %d", flags, size);
808
809 /* If we're replaying, we start at the replay position. Otherwise, we
810 start at the tail of the trace. */
811 replay = btinfo->replay;
812 if (replay != NULL)
813 begin = *replay;
814 else
815 btrace_insn_end (&begin, btinfo);
816
817 /* We start from here and expand in the requested direction. Then we
818 expand in the other direction, as well, to fill up any remaining
819 context. */
820 end = begin;
821 if (size < 0)
822 {
823 /* We want the current position covered, as well. */
824 covered = btrace_insn_next (&end, 1);
825 covered += btrace_insn_prev (&begin, context - covered);
826 covered += btrace_insn_next (&end, context - covered);
827 }
828 else
829 {
830 covered = btrace_insn_next (&end, context);
831 covered += btrace_insn_prev (&begin, context - covered);
832 }
833 }
834 else
835 {
836 begin = history->begin;
837 end = history->end;
838
839 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
840 btrace_insn_number (&begin), btrace_insn_number (&end));
841
842 if (size < 0)
843 {
844 end = begin;
845 covered = btrace_insn_prev (&begin, context);
846 }
847 else
848 {
849 begin = end;
850 covered = btrace_insn_next (&end, context);
851 }
852 }
853
854 if (covered > 0)
855 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
856 else
857 {
858 if (size < 0)
859 printf_unfiltered (_("At the start of the branch trace record.\n"));
860 else
861 printf_unfiltered (_("At the end of the branch trace record.\n"));
862 }
863
864 btrace_set_insn_history (btinfo, &begin, &end);
865 do_cleanups (uiout_cleanup);
866 }
867
868 /* The to_insn_history_range method of target record-btrace. */
869
870 static void
871 record_btrace_insn_history_range (struct target_ops *self,
872 ULONGEST from, ULONGEST to, int flags)
873 {
874 struct btrace_thread_info *btinfo;
875 struct btrace_insn_history *history;
876 struct btrace_insn_iterator begin, end;
877 struct cleanup *uiout_cleanup;
878 struct ui_out *uiout;
879 unsigned int low, high;
880 int found;
881
882 uiout = current_uiout;
883 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
884 "insn history");
885 low = from;
886 high = to;
887
888 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
889
890 /* Check for wrap-arounds. */
891 if (low != from || high != to)
892 error (_("Bad range."));
893
894 if (high < low)
895 error (_("Bad range."));
896
897 btinfo = require_btrace ();
898
899 found = btrace_find_insn_by_number (&begin, btinfo, low);
900 if (found == 0)
901 error (_("Range out of bounds."));
902
903 found = btrace_find_insn_by_number (&end, btinfo, high);
904 if (found == 0)
905 {
906 /* Silently truncate the range. */
907 btrace_insn_end (&end, btinfo);
908 }
909 else
910 {
911 /* We want both begin and end to be inclusive. */
912 btrace_insn_next (&end, 1);
913 }
914
915 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
916 btrace_set_insn_history (btinfo, &begin, &end);
917
918 do_cleanups (uiout_cleanup);
919 }
920
921 /* The to_insn_history_from method of target record-btrace. */
922
923 static void
924 record_btrace_insn_history_from (struct target_ops *self,
925 ULONGEST from, int size, int flags)
926 {
927 ULONGEST begin, end, context;
928
929 context = abs (size);
930 if (context == 0)
931 error (_("Bad record instruction-history-size."));
932
933 if (size < 0)
934 {
935 end = from;
936
937 if (from < context)
938 begin = 0;
939 else
940 begin = from - context + 1;
941 }
942 else
943 {
944 begin = from;
945 end = from + context - 1;
946
947 /* Check for wrap-around. */
948 if (end < begin)
949 end = ULONGEST_MAX;
950 }
951
952 record_btrace_insn_history_range (self, begin, end, flags);
953 }
954
955 /* Print the instruction number range for a function call history line. */
956
957 static void
958 btrace_call_history_insn_range (struct ui_out *uiout,
959 const struct btrace_function *bfun)
960 {
961 unsigned int begin, end, size;
962
963 size = VEC_length (btrace_insn_s, bfun->insn);
964 gdb_assert (size > 0);
965
966 begin = bfun->insn_offset;
967 end = begin + size - 1;
968
969 ui_out_field_uint (uiout, "insn begin", begin);
970 ui_out_text (uiout, ",");
971 ui_out_field_uint (uiout, "insn end", end);
972 }
973
974 /* Compute the lowest and highest source line for the instructions in BFUN
975 and return them in PBEGIN and PEND.
976 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
977 result from inlining or macro expansion. */
978
979 static void
980 btrace_compute_src_line_range (const struct btrace_function *bfun,
981 int *pbegin, int *pend)
982 {
983 struct btrace_insn *insn;
984 struct symtab *symtab;
985 struct symbol *sym;
986 unsigned int idx;
987 int begin, end;
988
989 begin = INT_MAX;
990 end = INT_MIN;
991
992 sym = bfun->sym;
993 if (sym == NULL)
994 goto out;
995
996 symtab = symbol_symtab (sym);
997
998 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
999 {
1000 struct symtab_and_line sal;
1001
1002 sal = find_pc_line (insn->pc, 0);
1003 if (sal.symtab != symtab || sal.line == 0)
1004 continue;
1005
1006 begin = min (begin, sal.line);
1007 end = max (end, sal.line);
1008 }
1009
1010 out:
1011 *pbegin = begin;
1012 *pend = end;
1013 }
1014
1015 /* Print the source line information for a function call history line. */
1016
1017 static void
1018 btrace_call_history_src_line (struct ui_out *uiout,
1019 const struct btrace_function *bfun)
1020 {
1021 struct symbol *sym;
1022 int begin, end;
1023
1024 sym = bfun->sym;
1025 if (sym == NULL)
1026 return;
1027
1028 ui_out_field_string (uiout, "file",
1029 symtab_to_filename_for_display (symbol_symtab (sym)));
1030
1031 btrace_compute_src_line_range (bfun, &begin, &end);
1032 if (end < begin)
1033 return;
1034
1035 ui_out_text (uiout, ":");
1036 ui_out_field_int (uiout, "min line", begin);
1037
1038 if (end == begin)
1039 return;
1040
1041 ui_out_text (uiout, ",");
1042 ui_out_field_int (uiout, "max line", end);
1043 }
1044
1045 /* Get the name of a branch trace function. */
1046
1047 static const char *
1048 btrace_get_bfun_name (const struct btrace_function *bfun)
1049 {
1050 struct minimal_symbol *msym;
1051 struct symbol *sym;
1052
1053 if (bfun == NULL)
1054 return "??";
1055
1056 msym = bfun->msym;
1057 sym = bfun->sym;
1058
1059 if (sym != NULL)
1060 return SYMBOL_PRINT_NAME (sym);
1061 else if (msym != NULL)
1062 return MSYMBOL_PRINT_NAME (msym);
1063 else
1064 return "??";
1065 }
1066
1067 /* Disassemble a section of the recorded function trace. */
1068
1069 static void
1070 btrace_call_history (struct ui_out *uiout,
1071 const struct btrace_thread_info *btinfo,
1072 const struct btrace_call_iterator *begin,
1073 const struct btrace_call_iterator *end,
1074 int int_flags)
1075 {
1076 struct btrace_call_iterator it;
1077 record_print_flags flags = (enum record_print_flag) int_flags;
1078
1079 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1080 btrace_call_number (end));
1081
1082 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1083 {
1084 const struct btrace_function *bfun;
1085 struct minimal_symbol *msym;
1086 struct symbol *sym;
1087
1088 bfun = btrace_call_get (&it);
1089 sym = bfun->sym;
1090 msym = bfun->msym;
1091
1092 /* Print the function index. */
1093 ui_out_field_uint (uiout, "index", bfun->number);
1094 ui_out_text (uiout, "\t");
1095
1096 /* Indicate gaps in the trace. */
1097 if (bfun->errcode != 0)
1098 {
1099 const struct btrace_config *conf;
1100
1101 conf = btrace_conf (btinfo);
1102
1103 /* We have trace so we must have a configuration. */
1104 gdb_assert (conf != NULL);
1105
1106 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1107
1108 continue;
1109 }
1110
1111 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1112 {
1113 int level = bfun->level + btinfo->level, i;
1114
1115 for (i = 0; i < level; ++i)
1116 ui_out_text (uiout, " ");
1117 }
1118
1119 if (sym != NULL)
1120 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1121 else if (msym != NULL)
1122 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1123 else if (!ui_out_is_mi_like_p (uiout))
1124 ui_out_field_string (uiout, "function", "??");
1125
1126 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1127 {
1128 ui_out_text (uiout, _("\tinst "));
1129 btrace_call_history_insn_range (uiout, bfun);
1130 }
1131
1132 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1133 {
1134 ui_out_text (uiout, _("\tat "));
1135 btrace_call_history_src_line (uiout, bfun);
1136 }
1137
1138 ui_out_text (uiout, "\n");
1139 }
1140 }
1141
1142 /* The to_call_history method of target record-btrace. */
1143
1144 static void
1145 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1146 {
1147 struct btrace_thread_info *btinfo;
1148 struct btrace_call_history *history;
1149 struct btrace_call_iterator begin, end;
1150 struct cleanup *uiout_cleanup;
1151 struct ui_out *uiout;
1152 unsigned int context, covered;
1153 record_print_flags flags = (enum record_print_flag) int_flags;
1154
1155 uiout = current_uiout;
1156 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1157 "insn history");
1158 context = abs (size);
1159 if (context == 0)
1160 error (_("Bad record function-call-history-size."));
1161
1162 btinfo = require_btrace ();
1163 history = btinfo->call_history;
1164 if (history == NULL)
1165 {
1166 struct btrace_insn_iterator *replay;
1167
1168 DEBUG ("call-history (0x%x): %d", int_flags, size);
1169
1170 /* If we're replaying, we start at the replay position. Otherwise, we
1171 start at the tail of the trace. */
1172 replay = btinfo->replay;
1173 if (replay != NULL)
1174 {
1175 begin.function = replay->function;
1176 begin.btinfo = btinfo;
1177 }
1178 else
1179 btrace_call_end (&begin, btinfo);
1180
1181 /* We start from here and expand in the requested direction. Then we
1182 expand in the other direction, as well, to fill up any remaining
1183 context. */
1184 end = begin;
1185 if (size < 0)
1186 {
1187 /* We want the current position covered, as well. */
1188 covered = btrace_call_next (&end, 1);
1189 covered += btrace_call_prev (&begin, context - covered);
1190 covered += btrace_call_next (&end, context - covered);
1191 }
1192 else
1193 {
1194 covered = btrace_call_next (&end, context);
1195 covered += btrace_call_prev (&begin, context- covered);
1196 }
1197 }
1198 else
1199 {
1200 begin = history->begin;
1201 end = history->end;
1202
1203 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1204 btrace_call_number (&begin), btrace_call_number (&end));
1205
1206 if (size < 0)
1207 {
1208 end = begin;
1209 covered = btrace_call_prev (&begin, context);
1210 }
1211 else
1212 {
1213 begin = end;
1214 covered = btrace_call_next (&end, context);
1215 }
1216 }
1217
1218 if (covered > 0)
1219 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1220 else
1221 {
1222 if (size < 0)
1223 printf_unfiltered (_("At the start of the branch trace record.\n"));
1224 else
1225 printf_unfiltered (_("At the end of the branch trace record.\n"));
1226 }
1227
1228 btrace_set_call_history (btinfo, &begin, &end);
1229 do_cleanups (uiout_cleanup);
1230 }
1231
1232 /* The to_call_history_range method of target record-btrace. */
1233
1234 static void
1235 record_btrace_call_history_range (struct target_ops *self,
1236 ULONGEST from, ULONGEST to,
1237 int int_flags)
1238 {
1239 struct btrace_thread_info *btinfo;
1240 struct btrace_call_history *history;
1241 struct btrace_call_iterator begin, end;
1242 struct cleanup *uiout_cleanup;
1243 struct ui_out *uiout;
1244 unsigned int low, high;
1245 int found;
1246 record_print_flags flags = (enum record_print_flag) int_flags;
1247
1248 uiout = current_uiout;
1249 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1250 "func history");
1251 low = from;
1252 high = to;
1253
1254 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1255
1256 /* Check for wrap-arounds. */
1257 if (low != from || high != to)
1258 error (_("Bad range."));
1259
1260 if (high < low)
1261 error (_("Bad range."));
1262
1263 btinfo = require_btrace ();
1264
1265 found = btrace_find_call_by_number (&begin, btinfo, low);
1266 if (found == 0)
1267 error (_("Range out of bounds."));
1268
1269 found = btrace_find_call_by_number (&end, btinfo, high);
1270 if (found == 0)
1271 {
1272 /* Silently truncate the range. */
1273 btrace_call_end (&end, btinfo);
1274 }
1275 else
1276 {
1277 /* We want both begin and end to be inclusive. */
1278 btrace_call_next (&end, 1);
1279 }
1280
1281 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1282 btrace_set_call_history (btinfo, &begin, &end);
1283
1284 do_cleanups (uiout_cleanup);
1285 }
1286
1287 /* The to_call_history_from method of target record-btrace. */
1288
1289 static void
1290 record_btrace_call_history_from (struct target_ops *self,
1291 ULONGEST from, int size,
1292 int int_flags)
1293 {
1294 ULONGEST begin, end, context;
1295 record_print_flags flags = (enum record_print_flag) int_flags;
1296
1297 context = abs (size);
1298 if (context == 0)
1299 error (_("Bad record function-call-history-size."));
1300
1301 if (size < 0)
1302 {
1303 end = from;
1304
1305 if (from < context)
1306 begin = 0;
1307 else
1308 begin = from - context + 1;
1309 }
1310 else
1311 {
1312 begin = from;
1313 end = from + context - 1;
1314
1315 /* Check for wrap-around. */
1316 if (end < begin)
1317 end = ULONGEST_MAX;
1318 }
1319
1320 record_btrace_call_history_range (self, begin, end, flags);
1321 }
1322
1323 /* The to_record_is_replaying method of target record-btrace. */
1324
1325 static int
1326 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1327 {
1328 struct thread_info *tp;
1329
1330 ALL_NON_EXITED_THREADS (tp)
1331 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1332 return 1;
1333
1334 return 0;
1335 }
1336
1337 /* The to_record_will_replay method of target record-btrace. */
1338
1339 static int
1340 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1341 {
1342 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1343 }
1344
1345 /* The to_xfer_partial method of target record-btrace. */
1346
1347 static enum target_xfer_status
1348 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1349 const char *annex, gdb_byte *readbuf,
1350 const gdb_byte *writebuf, ULONGEST offset,
1351 ULONGEST len, ULONGEST *xfered_len)
1352 {
1353 struct target_ops *t;
1354
1355 /* Filter out requests that don't make sense during replay. */
1356 if (replay_memory_access == replay_memory_access_read_only
1357 && !record_btrace_generating_corefile
1358 && record_btrace_is_replaying (ops, inferior_ptid))
1359 {
1360 switch (object)
1361 {
1362 case TARGET_OBJECT_MEMORY:
1363 {
1364 struct target_section *section;
1365
1366 /* We do not allow writing memory in general. */
1367 if (writebuf != NULL)
1368 {
1369 *xfered_len = len;
1370 return TARGET_XFER_UNAVAILABLE;
1371 }
1372
1373 /* We allow reading readonly memory. */
1374 section = target_section_by_addr (ops, offset);
1375 if (section != NULL)
1376 {
1377 /* Check if the section we found is readonly. */
1378 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1379 section->the_bfd_section)
1380 & SEC_READONLY) != 0)
1381 {
1382 /* Truncate the request to fit into this section. */
1383 len = min (len, section->endaddr - offset);
1384 break;
1385 }
1386 }
1387
1388 *xfered_len = len;
1389 return TARGET_XFER_UNAVAILABLE;
1390 }
1391 }
1392 }
1393
1394 /* Forward the request. */
1395 ops = ops->beneath;
1396 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1397 offset, len, xfered_len);
1398 }
1399
1400 /* The to_insert_breakpoint method of target record-btrace. */
1401
1402 static int
1403 record_btrace_insert_breakpoint (struct target_ops *ops,
1404 struct gdbarch *gdbarch,
1405 struct bp_target_info *bp_tgt)
1406 {
1407 const char *old;
1408 int ret;
1409
1410 /* Inserting breakpoints requires accessing memory. Allow it for the
1411 duration of this function. */
1412 old = replay_memory_access;
1413 replay_memory_access = replay_memory_access_read_write;
1414
1415 ret = 0;
1416 TRY
1417 {
1418 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1419 }
1420 CATCH (except, RETURN_MASK_ALL)
1421 {
1422 replay_memory_access = old;
1423 throw_exception (except);
1424 }
1425 END_CATCH
1426 replay_memory_access = old;
1427
1428 return ret;
1429 }
1430
1431 /* The to_remove_breakpoint method of target record-btrace. */
1432
1433 static int
1434 record_btrace_remove_breakpoint (struct target_ops *ops,
1435 struct gdbarch *gdbarch,
1436 struct bp_target_info *bp_tgt)
1437 {
1438 const char *old;
1439 int ret;
1440
1441 /* Removing breakpoints requires accessing memory. Allow it for the
1442 duration of this function. */
1443 old = replay_memory_access;
1444 replay_memory_access = replay_memory_access_read_write;
1445
1446 ret = 0;
1447 TRY
1448 {
1449 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1450 }
1451 CATCH (except, RETURN_MASK_ALL)
1452 {
1453 replay_memory_access = old;
1454 throw_exception (except);
1455 }
1456 END_CATCH
1457 replay_memory_access = old;
1458
1459 return ret;
1460 }
1461
1462 /* The to_fetch_registers method of target record-btrace. */
1463
1464 static void
1465 record_btrace_fetch_registers (struct target_ops *ops,
1466 struct regcache *regcache, int regno)
1467 {
1468 struct btrace_insn_iterator *replay;
1469 struct thread_info *tp;
1470
1471 tp = find_thread_ptid (inferior_ptid);
1472 gdb_assert (tp != NULL);
1473
1474 replay = tp->btrace.replay;
1475 if (replay != NULL && !record_btrace_generating_corefile)
1476 {
1477 const struct btrace_insn *insn;
1478 struct gdbarch *gdbarch;
1479 int pcreg;
1480
1481 gdbarch = get_regcache_arch (regcache);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0)
1484 return;
1485
1486 /* We can only provide the PC register. */
1487 if (regno >= 0 && regno != pcreg)
1488 return;
1489
1490 insn = btrace_insn_get (replay);
1491 gdb_assert (insn != NULL);
1492
1493 regcache_raw_supply (regcache, regno, &insn->pc);
1494 }
1495 else
1496 {
1497 struct target_ops *t = ops->beneath;
1498
1499 t->to_fetch_registers (t, regcache, regno);
1500 }
1501 }
1502
1503 /* The to_store_registers method of target record-btrace. */
1504
1505 static void
1506 record_btrace_store_registers (struct target_ops *ops,
1507 struct regcache *regcache, int regno)
1508 {
1509 struct target_ops *t;
1510
1511 if (!record_btrace_generating_corefile
1512 && record_btrace_is_replaying (ops, inferior_ptid))
1513 error (_("Cannot write registers while replaying."));
1514
1515 gdb_assert (may_write_registers != 0);
1516
1517 t = ops->beneath;
1518 t->to_store_registers (t, regcache, regno);
1519 }
1520
1521 /* The to_prepare_to_store method of target record-btrace. */
1522
1523 static void
1524 record_btrace_prepare_to_store (struct target_ops *ops,
1525 struct regcache *regcache)
1526 {
1527 struct target_ops *t;
1528
1529 if (!record_btrace_generating_corefile
1530 && record_btrace_is_replaying (ops, inferior_ptid))
1531 return;
1532
1533 t = ops->beneath;
1534 t->to_prepare_to_store (t, regcache);
1535 }
1536
1537 /* The branch trace frame cache. */
1538
1539 struct btrace_frame_cache
1540 {
1541 /* The thread. */
1542 struct thread_info *tp;
1543
1544 /* The frame info. */
1545 struct frame_info *frame;
1546
1547 /* The branch trace function segment. */
1548 const struct btrace_function *bfun;
1549 };
1550
1551 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1552
1553 static htab_t bfcache;
1554
1555 /* hash_f for htab_create_alloc of bfcache. */
1556
1557 static hashval_t
1558 bfcache_hash (const void *arg)
1559 {
1560 const struct btrace_frame_cache *cache
1561 = (const struct btrace_frame_cache *) arg;
1562
1563 return htab_hash_pointer (cache->frame);
1564 }
1565
1566 /* eq_f for htab_create_alloc of bfcache. */
1567
1568 static int
1569 bfcache_eq (const void *arg1, const void *arg2)
1570 {
1571 const struct btrace_frame_cache *cache1
1572 = (const struct btrace_frame_cache *) arg1;
1573 const struct btrace_frame_cache *cache2
1574 = (const struct btrace_frame_cache *) arg2;
1575
1576 return cache1->frame == cache2->frame;
1577 }
1578
1579 /* Create a new btrace frame cache. */
1580
1581 static struct btrace_frame_cache *
1582 bfcache_new (struct frame_info *frame)
1583 {
1584 struct btrace_frame_cache *cache;
1585 void **slot;
1586
1587 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1588 cache->frame = frame;
1589
1590 slot = htab_find_slot (bfcache, cache, INSERT);
1591 gdb_assert (*slot == NULL);
1592 *slot = cache;
1593
1594 return cache;
1595 }
1596
1597 /* Extract the branch trace function from a branch trace frame. */
1598
1599 static const struct btrace_function *
1600 btrace_get_frame_function (struct frame_info *frame)
1601 {
1602 const struct btrace_frame_cache *cache;
1603 const struct btrace_function *bfun;
1604 struct btrace_frame_cache pattern;
1605 void **slot;
1606
1607 pattern.frame = frame;
1608
1609 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1610 if (slot == NULL)
1611 return NULL;
1612
1613 cache = (const struct btrace_frame_cache *) *slot;
1614 return cache->bfun;
1615 }
1616
1617 /* Implement stop_reason method for record_btrace_frame_unwind. */
1618
1619 static enum unwind_stop_reason
1620 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1621 void **this_cache)
1622 {
1623 const struct btrace_frame_cache *cache;
1624 const struct btrace_function *bfun;
1625
1626 cache = (const struct btrace_frame_cache *) *this_cache;
1627 bfun = cache->bfun;
1628 gdb_assert (bfun != NULL);
1629
1630 if (bfun->up == NULL)
1631 return UNWIND_UNAVAILABLE;
1632
1633 return UNWIND_NO_REASON;
1634 }
1635
1636 /* Implement this_id method for record_btrace_frame_unwind. */
1637
1638 static void
1639 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1640 struct frame_id *this_id)
1641 {
1642 const struct btrace_frame_cache *cache;
1643 const struct btrace_function *bfun;
1644 CORE_ADDR code, special;
1645
1646 cache = (const struct btrace_frame_cache *) *this_cache;
1647
1648 bfun = cache->bfun;
1649 gdb_assert (bfun != NULL);
1650
1651 while (bfun->segment.prev != NULL)
1652 bfun = bfun->segment.prev;
1653
1654 code = get_frame_func (this_frame);
1655 special = bfun->number;
1656
1657 *this_id = frame_id_build_unavailable_stack_special (code, special);
1658
1659 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1660 btrace_get_bfun_name (cache->bfun),
1661 core_addr_to_string_nz (this_id->code_addr),
1662 core_addr_to_string_nz (this_id->special_addr));
1663 }
1664
1665 /* Implement prev_register method for record_btrace_frame_unwind. */
1666
1667 static struct value *
1668 record_btrace_frame_prev_register (struct frame_info *this_frame,
1669 void **this_cache,
1670 int regnum)
1671 {
1672 const struct btrace_frame_cache *cache;
1673 const struct btrace_function *bfun, *caller;
1674 const struct btrace_insn *insn;
1675 struct gdbarch *gdbarch;
1676 CORE_ADDR pc;
1677 int pcreg;
1678
1679 gdbarch = get_frame_arch (this_frame);
1680 pcreg = gdbarch_pc_regnum (gdbarch);
1681 if (pcreg < 0 || regnum != pcreg)
1682 throw_error (NOT_AVAILABLE_ERROR,
1683 _("Registers are not available in btrace record history"));
1684
1685 cache = (const struct btrace_frame_cache *) *this_cache;
1686 bfun = cache->bfun;
1687 gdb_assert (bfun != NULL);
1688
1689 caller = bfun->up;
1690 if (caller == NULL)
1691 throw_error (NOT_AVAILABLE_ERROR,
1692 _("No caller in btrace record history"));
1693
1694 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1695 {
1696 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1697 pc = insn->pc;
1698 }
1699 else
1700 {
1701 insn = VEC_last (btrace_insn_s, caller->insn);
1702 pc = insn->pc;
1703
1704 pc += gdb_insn_length (gdbarch, pc);
1705 }
1706
1707 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1708 btrace_get_bfun_name (bfun), bfun->level,
1709 core_addr_to_string_nz (pc));
1710
1711 return frame_unwind_got_address (this_frame, regnum, pc);
1712 }
1713
1714 /* Implement sniffer method for record_btrace_frame_unwind. */
1715
1716 static int
1717 record_btrace_frame_sniffer (const struct frame_unwind *self,
1718 struct frame_info *this_frame,
1719 void **this_cache)
1720 {
1721 const struct btrace_function *bfun;
1722 struct btrace_frame_cache *cache;
1723 struct thread_info *tp;
1724 struct frame_info *next;
1725
1726 /* THIS_FRAME does not contain a reference to its thread. */
1727 tp = find_thread_ptid (inferior_ptid);
1728 gdb_assert (tp != NULL);
1729
1730 bfun = NULL;
1731 next = get_next_frame (this_frame);
1732 if (next == NULL)
1733 {
1734 const struct btrace_insn_iterator *replay;
1735
1736 replay = tp->btrace.replay;
1737 if (replay != NULL)
1738 bfun = replay->function;
1739 }
1740 else
1741 {
1742 const struct btrace_function *callee;
1743
1744 callee = btrace_get_frame_function (next);
1745 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1746 bfun = callee->up;
1747 }
1748
1749 if (bfun == NULL)
1750 return 0;
1751
1752 DEBUG ("[frame] sniffed frame for %s on level %d",
1753 btrace_get_bfun_name (bfun), bfun->level);
1754
1755 /* This is our frame. Initialize the frame cache. */
1756 cache = bfcache_new (this_frame);
1757 cache->tp = tp;
1758 cache->bfun = bfun;
1759
1760 *this_cache = cache;
1761 return 1;
1762 }
1763
1764 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1765
1766 static int
1767 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1768 struct frame_info *this_frame,
1769 void **this_cache)
1770 {
1771 const struct btrace_function *bfun, *callee;
1772 struct btrace_frame_cache *cache;
1773 struct frame_info *next;
1774
1775 next = get_next_frame (this_frame);
1776 if (next == NULL)
1777 return 0;
1778
1779 callee = btrace_get_frame_function (next);
1780 if (callee == NULL)
1781 return 0;
1782
1783 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1784 return 0;
1785
1786 bfun = callee->up;
1787 if (bfun == NULL)
1788 return 0;
1789
1790 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1791 btrace_get_bfun_name (bfun), bfun->level);
1792
1793 /* This is our frame. Initialize the frame cache. */
1794 cache = bfcache_new (this_frame);
1795 cache->tp = find_thread_ptid (inferior_ptid);
1796 cache->bfun = bfun;
1797
1798 *this_cache = cache;
1799 return 1;
1800 }
1801
1802 static void
1803 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1804 {
1805 struct btrace_frame_cache *cache;
1806 void **slot;
1807
1808 cache = (struct btrace_frame_cache *) this_cache;
1809
1810 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1811 gdb_assert (slot != NULL);
1812
1813 htab_remove_elt (bfcache, cache);
1814 }
1815
1816 /* btrace recording does not store previous memory content, neither the stack
1817 frames content. Any unwinding would return errorneous results as the stack
1818 contents no longer matches the changed PC value restored from history.
1819 Therefore this unwinder reports any possibly unwound registers as
1820 <unavailable>. */
1821
1822 const struct frame_unwind record_btrace_frame_unwind =
1823 {
1824 NORMAL_FRAME,
1825 record_btrace_frame_unwind_stop_reason,
1826 record_btrace_frame_this_id,
1827 record_btrace_frame_prev_register,
1828 NULL,
1829 record_btrace_frame_sniffer,
1830 record_btrace_frame_dealloc_cache
1831 };
1832
1833 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1834 {
1835 TAILCALL_FRAME,
1836 record_btrace_frame_unwind_stop_reason,
1837 record_btrace_frame_this_id,
1838 record_btrace_frame_prev_register,
1839 NULL,
1840 record_btrace_tailcall_frame_sniffer,
1841 record_btrace_frame_dealloc_cache
1842 };
1843
1844 /* Implement the to_get_unwinder method. */
1845
1846 static const struct frame_unwind *
1847 record_btrace_to_get_unwinder (struct target_ops *self)
1848 {
1849 return &record_btrace_frame_unwind;
1850 }
1851
1852 /* Implement the to_get_tailcall_unwinder method. */
1853
1854 static const struct frame_unwind *
1855 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1856 {
1857 return &record_btrace_tailcall_frame_unwind;
1858 }
1859
1860 /* Return a human-readable string for FLAG. */
1861
1862 static const char *
1863 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1864 {
1865 switch (flag)
1866 {
1867 case BTHR_STEP:
1868 return "step";
1869
1870 case BTHR_RSTEP:
1871 return "reverse-step";
1872
1873 case BTHR_CONT:
1874 return "cont";
1875
1876 case BTHR_RCONT:
1877 return "reverse-cont";
1878
1879 case BTHR_STOP:
1880 return "stop";
1881 }
1882
1883 return "<invalid>";
1884 }
1885
1886 /* Indicate that TP should be resumed according to FLAG. */
1887
1888 static void
1889 record_btrace_resume_thread (struct thread_info *tp,
1890 enum btrace_thread_flag flag)
1891 {
1892 struct btrace_thread_info *btinfo;
1893
1894 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1895 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1896
1897 btinfo = &tp->btrace;
1898
1899 /* Fetch the latest branch trace. */
1900 btrace_fetch (tp);
1901
1902 /* A resume request overwrites a preceding resume or stop request. */
1903 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1904 btinfo->flags |= flag;
1905 }
1906
1907 /* Get the current frame for TP. */
1908
1909 static struct frame_info *
1910 get_thread_current_frame (struct thread_info *tp)
1911 {
1912 struct frame_info *frame;
1913 ptid_t old_inferior_ptid;
1914 int executing;
1915
1916 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1917 old_inferior_ptid = inferior_ptid;
1918 inferior_ptid = tp->ptid;
1919
1920 /* Clear the executing flag to allow changes to the current frame.
1921 We are not actually running, yet. We just started a reverse execution
1922 command or a record goto command.
1923 For the latter, EXECUTING is false and this has no effect.
1924 For the former, EXECUTING is true and we're in to_wait, about to
1925 move the thread. Since we need to recompute the stack, we temporarily
1926 set EXECUTING to flase. */
1927 executing = is_executing (inferior_ptid);
1928 set_executing (inferior_ptid, 0);
1929
1930 frame = NULL;
1931 TRY
1932 {
1933 frame = get_current_frame ();
1934 }
1935 CATCH (except, RETURN_MASK_ALL)
1936 {
1937 /* Restore the previous execution state. */
1938 set_executing (inferior_ptid, executing);
1939
1940 /* Restore the previous inferior_ptid. */
1941 inferior_ptid = old_inferior_ptid;
1942
1943 throw_exception (except);
1944 }
1945 END_CATCH
1946
1947 /* Restore the previous execution state. */
1948 set_executing (inferior_ptid, executing);
1949
1950 /* Restore the previous inferior_ptid. */
1951 inferior_ptid = old_inferior_ptid;
1952
1953 return frame;
1954 }
1955
1956 /* Start replaying a thread. */
1957
1958 static struct btrace_insn_iterator *
1959 record_btrace_start_replaying (struct thread_info *tp)
1960 {
1961 struct btrace_insn_iterator *replay;
1962 struct btrace_thread_info *btinfo;
1963
1964 btinfo = &tp->btrace;
1965 replay = NULL;
1966
1967 /* We can't start replaying without trace. */
1968 if (btinfo->begin == NULL)
1969 return NULL;
1970
1971 /* GDB stores the current frame_id when stepping in order to detects steps
1972 into subroutines.
1973 Since frames are computed differently when we're replaying, we need to
1974 recompute those stored frames and fix them up so we can still detect
1975 subroutines after we started replaying. */
1976 TRY
1977 {
1978 struct frame_info *frame;
1979 struct frame_id frame_id;
1980 int upd_step_frame_id, upd_step_stack_frame_id;
1981
1982 /* The current frame without replaying - computed via normal unwind. */
1983 frame = get_thread_current_frame (tp);
1984 frame_id = get_frame_id (frame);
1985
1986 /* Check if we need to update any stepping-related frame id's. */
1987 upd_step_frame_id = frame_id_eq (frame_id,
1988 tp->control.step_frame_id);
1989 upd_step_stack_frame_id = frame_id_eq (frame_id,
1990 tp->control.step_stack_frame_id);
1991
1992 /* We start replaying at the end of the branch trace. This corresponds
1993 to the current instruction. */
1994 replay = XNEW (struct btrace_insn_iterator);
1995 btrace_insn_end (replay, btinfo);
1996
1997 /* Skip gaps at the end of the trace. */
1998 while (btrace_insn_get (replay) == NULL)
1999 {
2000 unsigned int steps;
2001
2002 steps = btrace_insn_prev (replay, 1);
2003 if (steps == 0)
2004 error (_("No trace."));
2005 }
2006
2007 /* We're not replaying, yet. */
2008 gdb_assert (btinfo->replay == NULL);
2009 btinfo->replay = replay;
2010
2011 /* Make sure we're not using any stale registers. */
2012 registers_changed_ptid (tp->ptid);
2013
2014 /* The current frame with replaying - computed via btrace unwind. */
2015 frame = get_thread_current_frame (tp);
2016 frame_id = get_frame_id (frame);
2017
2018 /* Replace stepping related frames where necessary. */
2019 if (upd_step_frame_id)
2020 tp->control.step_frame_id = frame_id;
2021 if (upd_step_stack_frame_id)
2022 tp->control.step_stack_frame_id = frame_id;
2023 }
2024 CATCH (except, RETURN_MASK_ALL)
2025 {
2026 xfree (btinfo->replay);
2027 btinfo->replay = NULL;
2028
2029 registers_changed_ptid (tp->ptid);
2030
2031 throw_exception (except);
2032 }
2033 END_CATCH
2034
2035 return replay;
2036 }
2037
2038 /* Stop replaying a thread. */
2039
2040 static void
2041 record_btrace_stop_replaying (struct thread_info *tp)
2042 {
2043 struct btrace_thread_info *btinfo;
2044
2045 btinfo = &tp->btrace;
2046
2047 xfree (btinfo->replay);
2048 btinfo->replay = NULL;
2049
2050 /* Make sure we're not leaving any stale registers. */
2051 registers_changed_ptid (tp->ptid);
2052 }
2053
2054 /* Stop replaying TP if it is at the end of its execution history. */
2055
2056 static void
2057 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2058 {
2059 struct btrace_insn_iterator *replay, end;
2060 struct btrace_thread_info *btinfo;
2061
2062 btinfo = &tp->btrace;
2063 replay = btinfo->replay;
2064
2065 if (replay == NULL)
2066 return;
2067
2068 btrace_insn_end (&end, btinfo);
2069
2070 if (btrace_insn_cmp (replay, &end) == 0)
2071 record_btrace_stop_replaying (tp);
2072 }
2073
2074 /* The to_resume method of target record-btrace. */
2075
2076 static void
2077 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2078 enum gdb_signal signal)
2079 {
2080 struct thread_info *tp;
2081 enum btrace_thread_flag flag, cflag;
2082
2083 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2084 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2085 step ? "step" : "cont");
2086
2087 /* Store the execution direction of the last resume.
2088
2089 If there is more than one to_resume call, we have to rely on infrun
2090 to not change the execution direction in-between. */
2091 record_btrace_resume_exec_dir = execution_direction;
2092
2093 /* As long as we're not replaying, just forward the request.
2094
2095 For non-stop targets this means that no thread is replaying. In order to
2096 make progress, we may need to explicitly move replaying threads to the end
2097 of their execution history. */
2098 if ((execution_direction != EXEC_REVERSE)
2099 && !record_btrace_is_replaying (ops, minus_one_ptid))
2100 {
2101 ops = ops->beneath;
2102 ops->to_resume (ops, ptid, step, signal);
2103 return;
2104 }
2105
2106 /* Compute the btrace thread flag for the requested move. */
2107 if (execution_direction == EXEC_REVERSE)
2108 {
2109 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2110 cflag = BTHR_RCONT;
2111 }
2112 else
2113 {
2114 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2115 cflag = BTHR_CONT;
2116 }
2117
2118 /* We just indicate the resume intent here. The actual stepping happens in
2119 record_btrace_wait below.
2120
2121 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2122 if (!target_is_non_stop_p ())
2123 {
2124 gdb_assert (ptid_match (inferior_ptid, ptid));
2125
2126 ALL_NON_EXITED_THREADS (tp)
2127 if (ptid_match (tp->ptid, ptid))
2128 {
2129 if (ptid_match (tp->ptid, inferior_ptid))
2130 record_btrace_resume_thread (tp, flag);
2131 else
2132 record_btrace_resume_thread (tp, cflag);
2133 }
2134 }
2135 else
2136 {
2137 ALL_NON_EXITED_THREADS (tp)
2138 if (ptid_match (tp->ptid, ptid))
2139 record_btrace_resume_thread (tp, flag);
2140 }
2141
2142 /* Async support. */
2143 if (target_can_async_p ())
2144 {
2145 target_async (1);
2146 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2147 }
2148 }
2149
2150 /* Cancel resuming TP. */
2151
2152 static void
2153 record_btrace_cancel_resume (struct thread_info *tp)
2154 {
2155 enum btrace_thread_flag flags;
2156
2157 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2158 if (flags == 0)
2159 return;
2160
2161 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2162 print_thread_id (tp),
2163 target_pid_to_str (tp->ptid), flags,
2164 btrace_thread_flag_to_str (flags));
2165
2166 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2167 record_btrace_stop_replaying_at_end (tp);
2168 }
2169
2170 /* Return a target_waitstatus indicating that we ran out of history. */
2171
2172 static struct target_waitstatus
2173 btrace_step_no_history (void)
2174 {
2175 struct target_waitstatus status;
2176
2177 status.kind = TARGET_WAITKIND_NO_HISTORY;
2178
2179 return status;
2180 }
2181
2182 /* Return a target_waitstatus indicating that a step finished. */
2183
2184 static struct target_waitstatus
2185 btrace_step_stopped (void)
2186 {
2187 struct target_waitstatus status;
2188
2189 status.kind = TARGET_WAITKIND_STOPPED;
2190 status.value.sig = GDB_SIGNAL_TRAP;
2191
2192 return status;
2193 }
2194
2195 /* Return a target_waitstatus indicating that a thread was stopped as
2196 requested. */
2197
2198 static struct target_waitstatus
2199 btrace_step_stopped_on_request (void)
2200 {
2201 struct target_waitstatus status;
2202
2203 status.kind = TARGET_WAITKIND_STOPPED;
2204 status.value.sig = GDB_SIGNAL_0;
2205
2206 return status;
2207 }
2208
2209 /* Return a target_waitstatus indicating a spurious stop. */
2210
2211 static struct target_waitstatus
2212 btrace_step_spurious (void)
2213 {
2214 struct target_waitstatus status;
2215
2216 status.kind = TARGET_WAITKIND_SPURIOUS;
2217
2218 return status;
2219 }
2220
2221 /* Return a target_waitstatus indicating that the thread was not resumed. */
2222
2223 static struct target_waitstatus
2224 btrace_step_no_resumed (void)
2225 {
2226 struct target_waitstatus status;
2227
2228 status.kind = TARGET_WAITKIND_NO_RESUMED;
2229
2230 return status;
2231 }
2232
2233 /* Return a target_waitstatus indicating that we should wait again. */
2234
2235 static struct target_waitstatus
2236 btrace_step_again (void)
2237 {
2238 struct target_waitstatus status;
2239
2240 status.kind = TARGET_WAITKIND_IGNORE;
2241
2242 return status;
2243 }
2244
2245 /* Clear the record histories. */
2246
2247 static void
2248 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2249 {
2250 xfree (btinfo->insn_history);
2251 xfree (btinfo->call_history);
2252
2253 btinfo->insn_history = NULL;
2254 btinfo->call_history = NULL;
2255 }
2256
2257 /* Check whether TP's current replay position is at a breakpoint. */
2258
2259 static int
2260 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2261 {
2262 struct btrace_insn_iterator *replay;
2263 struct btrace_thread_info *btinfo;
2264 const struct btrace_insn *insn;
2265 struct inferior *inf;
2266
2267 btinfo = &tp->btrace;
2268 replay = btinfo->replay;
2269
2270 if (replay == NULL)
2271 return 0;
2272
2273 insn = btrace_insn_get (replay);
2274 if (insn == NULL)
2275 return 0;
2276
2277 inf = find_inferior_ptid (tp->ptid);
2278 if (inf == NULL)
2279 return 0;
2280
2281 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2282 &btinfo->stop_reason);
2283 }
2284
2285 /* Step one instruction in forward direction. */
2286
2287 static struct target_waitstatus
2288 record_btrace_single_step_forward (struct thread_info *tp)
2289 {
2290 struct btrace_insn_iterator *replay, end;
2291 struct btrace_thread_info *btinfo;
2292
2293 btinfo = &tp->btrace;
2294 replay = btinfo->replay;
2295
2296 /* We're done if we're not replaying. */
2297 if (replay == NULL)
2298 return btrace_step_no_history ();
2299
2300 /* Check if we're stepping a breakpoint. */
2301 if (record_btrace_replay_at_breakpoint (tp))
2302 return btrace_step_stopped ();
2303
2304 /* Skip gaps during replay. */
2305 do
2306 {
2307 unsigned int steps;
2308
2309 /* We will bail out here if we continue stepping after reaching the end
2310 of the execution history. */
2311 steps = btrace_insn_next (replay, 1);
2312 if (steps == 0)
2313 return btrace_step_no_history ();
2314 }
2315 while (btrace_insn_get (replay) == NULL);
2316
2317 /* Determine the end of the instruction trace. */
2318 btrace_insn_end (&end, btinfo);
2319
2320 /* The execution trace contains (and ends with) the current instruction.
2321 This instruction has not been executed, yet, so the trace really ends
2322 one instruction earlier. */
2323 if (btrace_insn_cmp (replay, &end) == 0)
2324 return btrace_step_no_history ();
2325
2326 return btrace_step_spurious ();
2327 }
2328
2329 /* Step one instruction in backward direction. */
2330
2331 static struct target_waitstatus
2332 record_btrace_single_step_backward (struct thread_info *tp)
2333 {
2334 struct btrace_insn_iterator *replay;
2335 struct btrace_thread_info *btinfo;
2336
2337 btinfo = &tp->btrace;
2338 replay = btinfo->replay;
2339
2340 /* Start replaying if we're not already doing so. */
2341 if (replay == NULL)
2342 replay = record_btrace_start_replaying (tp);
2343
2344 /* If we can't step any further, we reached the end of the history.
2345 Skip gaps during replay. */
2346 do
2347 {
2348 unsigned int steps;
2349
2350 steps = btrace_insn_prev (replay, 1);
2351 if (steps == 0)
2352 return btrace_step_no_history ();
2353 }
2354 while (btrace_insn_get (replay) == NULL);
2355
2356 /* Check if we're stepping a breakpoint.
2357
2358 For reverse-stepping, this check is after the step. There is logic in
2359 infrun.c that handles reverse-stepping separately. See, for example,
2360 proceed and adjust_pc_after_break.
2361
2362 This code assumes that for reverse-stepping, PC points to the last
2363 de-executed instruction, whereas for forward-stepping PC points to the
2364 next to-be-executed instruction. */
2365 if (record_btrace_replay_at_breakpoint (tp))
2366 return btrace_step_stopped ();
2367
2368 return btrace_step_spurious ();
2369 }
2370
2371 /* Step a single thread. */
2372
2373 static struct target_waitstatus
2374 record_btrace_step_thread (struct thread_info *tp)
2375 {
2376 struct btrace_thread_info *btinfo;
2377 struct target_waitstatus status;
2378 enum btrace_thread_flag flags;
2379
2380 btinfo = &tp->btrace;
2381
2382 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2383 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2384
2385 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2386 target_pid_to_str (tp->ptid), flags,
2387 btrace_thread_flag_to_str (flags));
2388
2389 /* We can't step without an execution history. */
2390 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2391 return btrace_step_no_history ();
2392
2393 switch (flags)
2394 {
2395 default:
2396 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2397
2398 case BTHR_STOP:
2399 return btrace_step_stopped_on_request ();
2400
2401 case BTHR_STEP:
2402 status = record_btrace_single_step_forward (tp);
2403 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2404 break;
2405
2406 return btrace_step_stopped ();
2407
2408 case BTHR_RSTEP:
2409 status = record_btrace_single_step_backward (tp);
2410 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2411 break;
2412
2413 return btrace_step_stopped ();
2414
2415 case BTHR_CONT:
2416 status = record_btrace_single_step_forward (tp);
2417 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2418 break;
2419
2420 btinfo->flags |= flags;
2421 return btrace_step_again ();
2422
2423 case BTHR_RCONT:
2424 status = record_btrace_single_step_backward (tp);
2425 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2426 break;
2427
2428 btinfo->flags |= flags;
2429 return btrace_step_again ();
2430 }
2431
2432 /* We keep threads moving at the end of their execution history. The to_wait
2433 method will stop the thread for whom the event is reported. */
2434 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2435 btinfo->flags |= flags;
2436
2437 return status;
2438 }
2439
2440 /* A vector of threads. */
2441
2442 typedef struct thread_info * tp_t;
2443 DEF_VEC_P (tp_t);
2444
2445 /* Announce further events if necessary. */
2446
2447 static void
2448 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2449 const VEC (tp_t) *no_history)
2450 {
2451 int more_moving, more_no_history;
2452
2453 more_moving = !VEC_empty (tp_t, moving);
2454 more_no_history = !VEC_empty (tp_t, no_history);
2455
2456 if (!more_moving && !more_no_history)
2457 return;
2458
2459 if (more_moving)
2460 DEBUG ("movers pending");
2461
2462 if (more_no_history)
2463 DEBUG ("no-history pending");
2464
2465 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2466 }
2467
2468 /* The to_wait method of target record-btrace. */
2469
2470 static ptid_t
2471 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2472 struct target_waitstatus *status, int options)
2473 {
2474 VEC (tp_t) *moving, *no_history;
2475 struct thread_info *tp, *eventing;
2476 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2477
2478 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2479
2480 /* As long as we're not replaying, just forward the request. */
2481 if ((execution_direction != EXEC_REVERSE)
2482 && !record_btrace_is_replaying (ops, minus_one_ptid))
2483 {
2484 ops = ops->beneath;
2485 return ops->to_wait (ops, ptid, status, options);
2486 }
2487
2488 moving = NULL;
2489 no_history = NULL;
2490
2491 make_cleanup (VEC_cleanup (tp_t), &moving);
2492 make_cleanup (VEC_cleanup (tp_t), &no_history);
2493
2494 /* Keep a work list of moving threads. */
2495 ALL_NON_EXITED_THREADS (tp)
2496 if (ptid_match (tp->ptid, ptid)
2497 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2498 VEC_safe_push (tp_t, moving, tp);
2499
2500 if (VEC_empty (tp_t, moving))
2501 {
2502 *status = btrace_step_no_resumed ();
2503
2504 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2505 target_waitstatus_to_string (status));
2506
2507 do_cleanups (cleanups);
2508 return null_ptid;
2509 }
2510
2511 /* Step moving threads one by one, one step each, until either one thread
2512 reports an event or we run out of threads to step.
2513
2514 When stepping more than one thread, chances are that some threads reach
2515 the end of their execution history earlier than others. If we reported
2516 this immediately, all-stop on top of non-stop would stop all threads and
2517 resume the same threads next time. And we would report the same thread
2518 having reached the end of its execution history again.
2519
2520 In the worst case, this would starve the other threads. But even if other
2521 threads would be allowed to make progress, this would result in far too
2522 many intermediate stops.
2523
2524 We therefore delay the reporting of "no execution history" until we have
2525 nothing else to report. By this time, all threads should have moved to
2526 either the beginning or the end of their execution history. There will
2527 be a single user-visible stop. */
2528 eventing = NULL;
2529 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2530 {
2531 unsigned int ix;
2532
2533 ix = 0;
2534 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2535 {
2536 *status = record_btrace_step_thread (tp);
2537
2538 switch (status->kind)
2539 {
2540 case TARGET_WAITKIND_IGNORE:
2541 ix++;
2542 break;
2543
2544 case TARGET_WAITKIND_NO_HISTORY:
2545 VEC_safe_push (tp_t, no_history,
2546 VEC_ordered_remove (tp_t, moving, ix));
2547 break;
2548
2549 default:
2550 eventing = VEC_unordered_remove (tp_t, moving, ix);
2551 break;
2552 }
2553 }
2554 }
2555
2556 if (eventing == NULL)
2557 {
2558 /* We started with at least one moving thread. This thread must have
2559 either stopped or reached the end of its execution history.
2560
2561 In the former case, EVENTING must not be NULL.
2562 In the latter case, NO_HISTORY must not be empty. */
2563 gdb_assert (!VEC_empty (tp_t, no_history));
2564
2565 /* We kept threads moving at the end of their execution history. Stop
2566 EVENTING now that we are going to report its stop. */
2567 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2568 eventing->btrace.flags &= ~BTHR_MOVE;
2569
2570 *status = btrace_step_no_history ();
2571 }
2572
2573 gdb_assert (eventing != NULL);
2574
2575 /* We kept threads replaying at the end of their execution history. Stop
2576 replaying EVENTING now that we are going to report its stop. */
2577 record_btrace_stop_replaying_at_end (eventing);
2578
2579 /* Stop all other threads. */
2580 if (!target_is_non_stop_p ())
2581 ALL_NON_EXITED_THREADS (tp)
2582 record_btrace_cancel_resume (tp);
2583
2584 /* In async mode, we need to announce further events. */
2585 if (target_is_async_p ())
2586 record_btrace_maybe_mark_async_event (moving, no_history);
2587
2588 /* Start record histories anew from the current position. */
2589 record_btrace_clear_histories (&eventing->btrace);
2590
2591 /* We moved the replay position but did not update registers. */
2592 registers_changed_ptid (eventing->ptid);
2593
2594 DEBUG ("wait ended by thread %s (%s): %s",
2595 print_thread_id (eventing),
2596 target_pid_to_str (eventing->ptid),
2597 target_waitstatus_to_string (status));
2598
2599 do_cleanups (cleanups);
2600 return eventing->ptid;
2601 }
2602
2603 /* The to_stop method of target record-btrace. */
2604
2605 static void
2606 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2607 {
2608 DEBUG ("stop %s", target_pid_to_str (ptid));
2609
2610 /* As long as we're not replaying, just forward the request. */
2611 if ((execution_direction != EXEC_REVERSE)
2612 && !record_btrace_is_replaying (ops, minus_one_ptid))
2613 {
2614 ops = ops->beneath;
2615 ops->to_stop (ops, ptid);
2616 }
2617 else
2618 {
2619 struct thread_info *tp;
2620
2621 ALL_NON_EXITED_THREADS (tp)
2622 if (ptid_match (tp->ptid, ptid))
2623 {
2624 tp->btrace.flags &= ~BTHR_MOVE;
2625 tp->btrace.flags |= BTHR_STOP;
2626 }
2627 }
2628 }
2629
2630 /* The to_can_execute_reverse method of target record-btrace. */
2631
2632 static int
2633 record_btrace_can_execute_reverse (struct target_ops *self)
2634 {
2635 return 1;
2636 }
2637
2638 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2639
2640 static int
2641 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2642 {
2643 if (record_btrace_is_replaying (ops, minus_one_ptid))
2644 {
2645 struct thread_info *tp = inferior_thread ();
2646
2647 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2648 }
2649
2650 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2651 }
2652
2653 /* The to_supports_stopped_by_sw_breakpoint method of target
2654 record-btrace. */
2655
2656 static int
2657 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2658 {
2659 if (record_btrace_is_replaying (ops, minus_one_ptid))
2660 return 1;
2661
2662 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2663 }
2664
2665 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2666
2667 static int
2668 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2669 {
2670 if (record_btrace_is_replaying (ops, minus_one_ptid))
2671 {
2672 struct thread_info *tp = inferior_thread ();
2673
2674 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2675 }
2676
2677 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2678 }
2679
2680 /* The to_supports_stopped_by_hw_breakpoint method of target
2681 record-btrace. */
2682
2683 static int
2684 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2685 {
2686 if (record_btrace_is_replaying (ops, minus_one_ptid))
2687 return 1;
2688
2689 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2690 }
2691
2692 /* The to_update_thread_list method of target record-btrace. */
2693
2694 static void
2695 record_btrace_update_thread_list (struct target_ops *ops)
2696 {
2697 /* We don't add or remove threads during replay. */
2698 if (record_btrace_is_replaying (ops, minus_one_ptid))
2699 return;
2700
2701 /* Forward the request. */
2702 ops = ops->beneath;
2703 ops->to_update_thread_list (ops);
2704 }
2705
2706 /* The to_thread_alive method of target record-btrace. */
2707
2708 static int
2709 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2710 {
2711 /* We don't add or remove threads during replay. */
2712 if (record_btrace_is_replaying (ops, minus_one_ptid))
2713 return find_thread_ptid (ptid) != NULL;
2714
2715 /* Forward the request. */
2716 ops = ops->beneath;
2717 return ops->to_thread_alive (ops, ptid);
2718 }
2719
2720 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2721 is stopped. */
2722
2723 static void
2724 record_btrace_set_replay (struct thread_info *tp,
2725 const struct btrace_insn_iterator *it)
2726 {
2727 struct btrace_thread_info *btinfo;
2728
2729 btinfo = &tp->btrace;
2730
2731 if (it == NULL || it->function == NULL)
2732 record_btrace_stop_replaying (tp);
2733 else
2734 {
2735 if (btinfo->replay == NULL)
2736 record_btrace_start_replaying (tp);
2737 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2738 return;
2739
2740 *btinfo->replay = *it;
2741 registers_changed_ptid (tp->ptid);
2742 }
2743
2744 /* Start anew from the new replay position. */
2745 record_btrace_clear_histories (btinfo);
2746
2747 stop_pc = regcache_read_pc (get_current_regcache ());
2748 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2749 }
2750
2751 /* The to_goto_record_begin method of target record-btrace. */
2752
2753 static void
2754 record_btrace_goto_begin (struct target_ops *self)
2755 {
2756 struct thread_info *tp;
2757 struct btrace_insn_iterator begin;
2758
2759 tp = require_btrace_thread ();
2760
2761 btrace_insn_begin (&begin, &tp->btrace);
2762 record_btrace_set_replay (tp, &begin);
2763 }
2764
2765 /* The to_goto_record_end method of target record-btrace. */
2766
2767 static void
2768 record_btrace_goto_end (struct target_ops *ops)
2769 {
2770 struct thread_info *tp;
2771
2772 tp = require_btrace_thread ();
2773
2774 record_btrace_set_replay (tp, NULL);
2775 }
2776
2777 /* The to_goto_record method of target record-btrace. */
2778
2779 static void
2780 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2781 {
2782 struct thread_info *tp;
2783 struct btrace_insn_iterator it;
2784 unsigned int number;
2785 int found;
2786
2787 number = insn;
2788
2789 /* Check for wrap-arounds. */
2790 if (number != insn)
2791 error (_("Instruction number out of range."));
2792
2793 tp = require_btrace_thread ();
2794
2795 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2796 if (found == 0)
2797 error (_("No such instruction."));
2798
2799 record_btrace_set_replay (tp, &it);
2800 }
2801
2802 /* The to_record_stop_replaying method of target record-btrace. */
2803
2804 static void
2805 record_btrace_stop_replaying_all (struct target_ops *self)
2806 {
2807 struct thread_info *tp;
2808
2809 ALL_NON_EXITED_THREADS (tp)
2810 record_btrace_stop_replaying (tp);
2811 }
2812
2813 /* The to_execution_direction target method. */
2814
2815 static enum exec_direction_kind
2816 record_btrace_execution_direction (struct target_ops *self)
2817 {
2818 return record_btrace_resume_exec_dir;
2819 }
2820
2821 /* The to_prepare_to_generate_core target method. */
2822
2823 static void
2824 record_btrace_prepare_to_generate_core (struct target_ops *self)
2825 {
2826 record_btrace_generating_corefile = 1;
2827 }
2828
2829 /* The to_done_generating_core target method. */
2830
2831 static void
2832 record_btrace_done_generating_core (struct target_ops *self)
2833 {
2834 record_btrace_generating_corefile = 0;
2835 }
2836
2837 /* Initialize the record-btrace target ops. */
2838
2839 static void
2840 init_record_btrace_ops (void)
2841 {
2842 struct target_ops *ops;
2843
2844 ops = &record_btrace_ops;
2845 ops->to_shortname = "record-btrace";
2846 ops->to_longname = "Branch tracing target";
2847 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2848 ops->to_open = record_btrace_open;
2849 ops->to_close = record_btrace_close;
2850 ops->to_async = record_btrace_async;
2851 ops->to_detach = record_detach;
2852 ops->to_disconnect = record_btrace_disconnect;
2853 ops->to_mourn_inferior = record_mourn_inferior;
2854 ops->to_kill = record_kill;
2855 ops->to_stop_recording = record_btrace_stop_recording;
2856 ops->to_info_record = record_btrace_info;
2857 ops->to_insn_history = record_btrace_insn_history;
2858 ops->to_insn_history_from = record_btrace_insn_history_from;
2859 ops->to_insn_history_range = record_btrace_insn_history_range;
2860 ops->to_call_history = record_btrace_call_history;
2861 ops->to_call_history_from = record_btrace_call_history_from;
2862 ops->to_call_history_range = record_btrace_call_history_range;
2863 ops->to_record_is_replaying = record_btrace_is_replaying;
2864 ops->to_record_will_replay = record_btrace_will_replay;
2865 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2866 ops->to_xfer_partial = record_btrace_xfer_partial;
2867 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2868 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2869 ops->to_fetch_registers = record_btrace_fetch_registers;
2870 ops->to_store_registers = record_btrace_store_registers;
2871 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2872 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2873 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2874 ops->to_resume = record_btrace_resume;
2875 ops->to_wait = record_btrace_wait;
2876 ops->to_stop = record_btrace_stop;
2877 ops->to_update_thread_list = record_btrace_update_thread_list;
2878 ops->to_thread_alive = record_btrace_thread_alive;
2879 ops->to_goto_record_begin = record_btrace_goto_begin;
2880 ops->to_goto_record_end = record_btrace_goto_end;
2881 ops->to_goto_record = record_btrace_goto;
2882 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2883 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2884 ops->to_supports_stopped_by_sw_breakpoint
2885 = record_btrace_supports_stopped_by_sw_breakpoint;
2886 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2887 ops->to_supports_stopped_by_hw_breakpoint
2888 = record_btrace_supports_stopped_by_hw_breakpoint;
2889 ops->to_execution_direction = record_btrace_execution_direction;
2890 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2891 ops->to_done_generating_core = record_btrace_done_generating_core;
2892 ops->to_stratum = record_stratum;
2893 ops->to_magic = OPS_MAGIC;
2894 }
2895
2896 /* Start recording in BTS format. */
2897
2898 static void
2899 cmd_record_btrace_bts_start (char *args, int from_tty)
2900 {
2901 if (args != NULL && *args != 0)
2902 error (_("Invalid argument."));
2903
2904 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2905
2906 TRY
2907 {
2908 execute_command ("target record-btrace", from_tty);
2909 }
2910 CATCH (exception, RETURN_MASK_ALL)
2911 {
2912 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2913 throw_exception (exception);
2914 }
2915 END_CATCH
2916 }
2917
2918 /* Start recording in Intel Processor Trace format. */
2919
2920 static void
2921 cmd_record_btrace_pt_start (char *args, int from_tty)
2922 {
2923 if (args != NULL && *args != 0)
2924 error (_("Invalid argument."));
2925
2926 record_btrace_conf.format = BTRACE_FORMAT_PT;
2927
2928 TRY
2929 {
2930 execute_command ("target record-btrace", from_tty);
2931 }
2932 CATCH (exception, RETURN_MASK_ALL)
2933 {
2934 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2935 throw_exception (exception);
2936 }
2937 END_CATCH
2938 }
2939
2940 /* Alias for "target record". */
2941
2942 static void
2943 cmd_record_btrace_start (char *args, int from_tty)
2944 {
2945 if (args != NULL && *args != 0)
2946 error (_("Invalid argument."));
2947
2948 record_btrace_conf.format = BTRACE_FORMAT_PT;
2949
2950 TRY
2951 {
2952 execute_command ("target record-btrace", from_tty);
2953 }
2954 CATCH (exception, RETURN_MASK_ALL)
2955 {
2956 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2957
2958 TRY
2959 {
2960 execute_command ("target record-btrace", from_tty);
2961 }
2962 CATCH (exception, RETURN_MASK_ALL)
2963 {
2964 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2965 throw_exception (exception);
2966 }
2967 END_CATCH
2968 }
2969 END_CATCH
2970 }
2971
2972 /* The "set record btrace" command. */
2973
2974 static void
2975 cmd_set_record_btrace (char *args, int from_tty)
2976 {
2977 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2978 }
2979
2980 /* The "show record btrace" command. */
2981
2982 static void
2983 cmd_show_record_btrace (char *args, int from_tty)
2984 {
2985 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2986 }
2987
2988 /* The "show record btrace replay-memory-access" command. */
2989
2990 static void
2991 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2992 struct cmd_list_element *c, const char *value)
2993 {
2994 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2995 replay_memory_access);
2996 }
2997
2998 /* The "set record btrace bts" command. */
2999
3000 static void
3001 cmd_set_record_btrace_bts (char *args, int from_tty)
3002 {
3003 printf_unfiltered (_("\"set record btrace bts\" must be followed "
3004 "by an appropriate subcommand.\n"));
3005 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3006 all_commands, gdb_stdout);
3007 }
3008
3009 /* The "show record btrace bts" command. */
3010
3011 static void
3012 cmd_show_record_btrace_bts (char *args, int from_tty)
3013 {
3014 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3015 }
3016
3017 /* The "set record btrace pt" command. */
3018
3019 static void
3020 cmd_set_record_btrace_pt (char *args, int from_tty)
3021 {
3022 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3023 "by an appropriate subcommand.\n"));
3024 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3025 all_commands, gdb_stdout);
3026 }
3027
3028 /* The "show record btrace pt" command. */
3029
3030 static void
3031 cmd_show_record_btrace_pt (char *args, int from_tty)
3032 {
3033 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3034 }
3035
3036 /* The "record bts buffer-size" show value function. */
3037
3038 static void
3039 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3040 struct cmd_list_element *c,
3041 const char *value)
3042 {
3043 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3044 value);
3045 }
3046
3047 /* The "record pt buffer-size" show value function. */
3048
3049 static void
3050 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3051 struct cmd_list_element *c,
3052 const char *value)
3053 {
3054 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3055 value);
3056 }
3057
3058 void _initialize_record_btrace (void);
3059
3060 /* Initialize btrace commands. */
3061
3062 void
3063 _initialize_record_btrace (void)
3064 {
3065 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3066 _("Start branch trace recording."), &record_btrace_cmdlist,
3067 "record btrace ", 0, &record_cmdlist);
3068 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3069
3070 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3071 _("\
3072 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3073 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3074 This format may not be available on all processors."),
3075 &record_btrace_cmdlist);
3076 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3077
3078 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3079 _("\
3080 Start branch trace recording in Intel Processor Trace format.\n\n\
3081 This format may not be available on all processors."),
3082 &record_btrace_cmdlist);
3083 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3084
3085 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3086 _("Set record options"), &set_record_btrace_cmdlist,
3087 "set record btrace ", 0, &set_record_cmdlist);
3088
3089 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3090 _("Show record options"), &show_record_btrace_cmdlist,
3091 "show record btrace ", 0, &show_record_cmdlist);
3092
3093 add_setshow_enum_cmd ("replay-memory-access", no_class,
3094 replay_memory_access_types, &replay_memory_access, _("\
3095 Set what memory accesses are allowed during replay."), _("\
3096 Show what memory accesses are allowed during replay."),
3097 _("Default is READ-ONLY.\n\n\
3098 The btrace record target does not trace data.\n\
3099 The memory therefore corresponds to the live target and not \
3100 to the current replay position.\n\n\
3101 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3102 When READ-WRITE, allow accesses to read-only and read-write memory during \
3103 replay."),
3104 NULL, cmd_show_replay_memory_access,
3105 &set_record_btrace_cmdlist,
3106 &show_record_btrace_cmdlist);
3107
3108 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3109 _("Set record btrace bts options"),
3110 &set_record_btrace_bts_cmdlist,
3111 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3112
3113 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3114 _("Show record btrace bts options"),
3115 &show_record_btrace_bts_cmdlist,
3116 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3117
3118 add_setshow_uinteger_cmd ("buffer-size", no_class,
3119 &record_btrace_conf.bts.size,
3120 _("Set the record/replay bts buffer size."),
3121 _("Show the record/replay bts buffer size."), _("\
3122 When starting recording request a trace buffer of this size. \
3123 The actual buffer size may differ from the requested size. \
3124 Use \"info record\" to see the actual buffer size.\n\n\
3125 Bigger buffers allow longer recording but also take more time to process \
3126 the recorded execution trace.\n\n\
3127 The trace buffer size may not be changed while recording."), NULL,
3128 show_record_bts_buffer_size_value,
3129 &set_record_btrace_bts_cmdlist,
3130 &show_record_btrace_bts_cmdlist);
3131
3132 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3133 _("Set record btrace pt options"),
3134 &set_record_btrace_pt_cmdlist,
3135 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3136
3137 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3138 _("Show record btrace pt options"),
3139 &show_record_btrace_pt_cmdlist,
3140 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3141
3142 add_setshow_uinteger_cmd ("buffer-size", no_class,
3143 &record_btrace_conf.pt.size,
3144 _("Set the record/replay pt buffer size."),
3145 _("Show the record/replay pt buffer size."), _("\
3146 Bigger buffers allow longer recording but also take more time to process \
3147 the recorded execution.\n\
3148 The actual buffer size may differ from the requested size. Use \"info record\" \
3149 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3150 &set_record_btrace_pt_cmdlist,
3151 &show_record_btrace_pt_cmdlist);
3152
3153 init_record_btrace_ops ();
3154 add_target (&record_btrace_ops);
3155
3156 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3157 xcalloc, xfree);
3158
3159 record_btrace_conf.bts.size = 64 * 1024;
3160 record_btrace_conf.pt.size = 16 * 1024;
3161 }