]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Add markers for release 2.26
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 #include "vec.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
76
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
79
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91 #define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101 /* Update the branch trace for the current thread and return a pointer to its
102 thread_info.
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
107 static struct thread_info *
108 require_btrace_thread (void)
109 {
110 struct thread_info *tp;
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
122
123 return tp;
124 }
125
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132 static struct btrace_thread_info *
133 require_btrace (void)
134 {
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
140 }
141
142 /* Enable branch tracing for one thread. Warn on errors. */
143
144 static void
145 record_btrace_enable_warn (struct thread_info *tp)
146 {
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
156 }
157
158 /* Callback function to disable branch tracing for one thread. */
159
160 static void
161 record_btrace_disable_callback (void *arg)
162 {
163 struct thread_info *tp = (struct thread_info *) arg;
164
165 btrace_disable (tp);
166 }
167
168 /* Enable automatic tracing of new threads. */
169
170 static void
171 record_btrace_auto_enable (void)
172 {
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177 }
178
179 /* Disable automatic tracing of new threads. */
180
181 static void
182 record_btrace_auto_disable (void)
183 {
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192 }
193
194 /* The record-btrace async event handler function. */
195
196 static void
197 record_btrace_handle_async_inferior_event (gdb_client_data data)
198 {
199 inferior_event_handler (INF_REG_EVENT, NULL);
200 }
201
202 /* The to_open method of target record-btrace. */
203
204 static void
205 record_btrace_open (const char *args, int from_tty)
206 {
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
209
210 DEBUG ("open");
211
212 record_preopen ();
213
214 if (!target_has_execution)
215 error (_("The program is not being run."));
216
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
220 ALL_NON_EXITED_THREADS (tp)
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
223 btrace_enable (tp, &record_btrace_conf);
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
235 record_btrace_generating_corefile = 0;
236
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240 }
241
242 /* The to_stop_recording method of target record-btrace. */
243
244 static void
245 record_btrace_stop_recording (struct target_ops *self)
246 {
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
253 ALL_NON_EXITED_THREADS (tp)
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256 }
257
258 /* The to_close method of target record-btrace. */
259
260 static void
261 record_btrace_close (struct target_ops *self)
262 {
263 struct thread_info *tp;
264
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp)
275 btrace_teardown (tp);
276 }
277
278 /* The to_async method of target record-btrace. */
279
280 static void
281 record_btrace_async (struct target_ops *ops, int enable)
282 {
283 if (enable)
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
285 else
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
287
288 ops->beneath->to_async (ops->beneath, enable);
289 }
290
291 /* Adjusts the size and returns a human readable size suffix. */
292
293 static const char *
294 record_btrace_adjust_size (unsigned int *size)
295 {
296 unsigned int sz;
297
298 sz = *size;
299
300 if ((sz & ((1u << 30) - 1)) == 0)
301 {
302 *size = sz >> 30;
303 return "GB";
304 }
305 else if ((sz & ((1u << 20) - 1)) == 0)
306 {
307 *size = sz >> 20;
308 return "MB";
309 }
310 else if ((sz & ((1u << 10) - 1)) == 0)
311 {
312 *size = sz >> 10;
313 return "kB";
314 }
315 else
316 return "";
317 }
318
319 /* Print a BTS configuration. */
320
321 static void
322 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
323 {
324 const char *suffix;
325 unsigned int size;
326
327 size = conf->size;
328 if (size > 0)
329 {
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
332 }
333 }
334
335 /* Print an Intel(R) Processor Trace configuration. */
336
337 static void
338 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
339 {
340 const char *suffix;
341 unsigned int size;
342
343 size = conf->size;
344 if (size > 0)
345 {
346 suffix = record_btrace_adjust_size (&size);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
348 }
349 }
350
351 /* Print a branch tracing configuration. */
352
353 static void
354 record_btrace_print_conf (const struct btrace_config *conf)
355 {
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf->format));
358
359 switch (conf->format)
360 {
361 case BTRACE_FORMAT_NONE:
362 return;
363
364 case BTRACE_FORMAT_BTS:
365 record_btrace_print_bts_conf (&conf->bts);
366 return;
367
368 case BTRACE_FORMAT_PT:
369 record_btrace_print_pt_conf (&conf->pt);
370 return;
371 }
372
373 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
374 }
375
376 /* The to_info_record method of target record-btrace. */
377
378 static void
379 record_btrace_info (struct target_ops *self)
380 {
381 struct btrace_thread_info *btinfo;
382 const struct btrace_config *conf;
383 struct thread_info *tp;
384 unsigned int insns, calls, gaps;
385
386 DEBUG ("info");
387
388 tp = find_thread_ptid (inferior_ptid);
389 if (tp == NULL)
390 error (_("No thread."));
391
392 btinfo = &tp->btrace;
393
394 conf = btrace_conf (btinfo);
395 if (conf != NULL)
396 record_btrace_print_conf (conf);
397
398 btrace_fetch (tp);
399
400 insns = 0;
401 calls = 0;
402 gaps = 0;
403
404 if (!btrace_is_empty (tp))
405 {
406 struct btrace_call_iterator call;
407 struct btrace_insn_iterator insn;
408
409 btrace_call_end (&call, btinfo);
410 btrace_call_prev (&call, 1);
411 calls = btrace_call_number (&call);
412
413 btrace_insn_end (&insn, btinfo);
414
415 insns = btrace_insn_number (&insn);
416 if (insns != 0)
417 {
418 /* The last instruction does not really belong to the trace. */
419 insns -= 1;
420 }
421 else
422 {
423 unsigned int steps;
424
425 /* Skip gaps at the end. */
426 do
427 {
428 steps = btrace_insn_prev (&insn, 1);
429 if (steps == 0)
430 break;
431
432 insns = btrace_insn_number (&insn);
433 }
434 while (insns == 0);
435 }
436
437 gaps = btinfo->ngaps;
438 }
439
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns, calls, gaps,
442 tp->num, target_pid_to_str (tp->ptid));
443
444 if (btrace_is_replaying (tp))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo->replay));
447 }
448
449 /* Print a decode error. */
450
451 static void
452 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
453 enum btrace_format format)
454 {
455 const char *errstr;
456 int is_error;
457
458 errstr = _("unknown");
459 is_error = 1;
460
461 switch (format)
462 {
463 default:
464 break;
465
466 case BTRACE_FORMAT_BTS:
467 switch (errcode)
468 {
469 default:
470 break;
471
472 case BDE_BTS_OVERFLOW:
473 errstr = _("instruction overflow");
474 break;
475
476 case BDE_BTS_INSN_SIZE:
477 errstr = _("unknown instruction");
478 break;
479 }
480 break;
481
482 #if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT:
484 switch (errcode)
485 {
486 case BDE_PT_USER_QUIT:
487 is_error = 0;
488 errstr = _("trace decode cancelled");
489 break;
490
491 case BDE_PT_DISABLED:
492 is_error = 0;
493 errstr = _("disabled");
494 break;
495
496 case BDE_PT_OVERFLOW:
497 is_error = 0;
498 errstr = _("overflow");
499 break;
500
501 default:
502 if (errcode < 0)
503 errstr = pt_errstr (pt_errcode (errcode));
504 break;
505 }
506 break;
507 #endif /* defined (HAVE_LIBIPT) */
508 }
509
510 ui_out_text (uiout, _("["));
511 if (is_error)
512 {
513 ui_out_text (uiout, _("decode error ("));
514 ui_out_field_int (uiout, "errcode", errcode);
515 ui_out_text (uiout, _("): "));
516 }
517 ui_out_text (uiout, errstr);
518 ui_out_text (uiout, _("]\n"));
519 }
520
521 /* Print an unsigned int. */
522
523 static void
524 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
525 {
526 ui_out_field_fmt (uiout, fld, "%u", val);
527 }
528
529 /* A range of source lines. */
530
531 struct btrace_line_range
532 {
533 /* The symtab this line is from. */
534 struct symtab *symtab;
535
536 /* The first line (inclusive). */
537 int begin;
538
539 /* The last line (exclusive). */
540 int end;
541 };
542
543 /* Construct a line range. */
544
545 static struct btrace_line_range
546 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
547 {
548 struct btrace_line_range range;
549
550 range.symtab = symtab;
551 range.begin = begin;
552 range.end = end;
553
554 return range;
555 }
556
557 /* Add a line to a line range. */
558
559 static struct btrace_line_range
560 btrace_line_range_add (struct btrace_line_range range, int line)
561 {
562 if (range.end <= range.begin)
563 {
564 /* This is the first entry. */
565 range.begin = line;
566 range.end = line + 1;
567 }
568 else if (line < range.begin)
569 range.begin = line;
570 else if (range.end < line)
571 range.end = line;
572
573 return range;
574 }
575
576 /* Return non-zero if RANGE is empty, zero otherwise. */
577
578 static int
579 btrace_line_range_is_empty (struct btrace_line_range range)
580 {
581 return range.end <= range.begin;
582 }
583
584 /* Return non-zero if LHS contains RHS, zero otherwise. */
585
586 static int
587 btrace_line_range_contains_range (struct btrace_line_range lhs,
588 struct btrace_line_range rhs)
589 {
590 return ((lhs.symtab == rhs.symtab)
591 && (lhs.begin <= rhs.begin)
592 && (rhs.end <= lhs.end));
593 }
594
595 /* Find the line range associated with PC. */
596
597 static struct btrace_line_range
598 btrace_find_line_range (CORE_ADDR pc)
599 {
600 struct btrace_line_range range;
601 struct linetable_entry *lines;
602 struct linetable *ltable;
603 struct symtab *symtab;
604 int nlines, i;
605
606 symtab = find_pc_line_symtab (pc);
607 if (symtab == NULL)
608 return btrace_mk_line_range (NULL, 0, 0);
609
610 ltable = SYMTAB_LINETABLE (symtab);
611 if (ltable == NULL)
612 return btrace_mk_line_range (symtab, 0, 0);
613
614 nlines = ltable->nitems;
615 lines = ltable->item;
616 if (nlines <= 0)
617 return btrace_mk_line_range (symtab, 0, 0);
618
619 range = btrace_mk_line_range (symtab, 0, 0);
620 for (i = 0; i < nlines - 1; i++)
621 {
622 if ((lines[i].pc == pc) && (lines[i].line != 0))
623 range = btrace_line_range_add (range, lines[i].line);
624 }
625
626 return range;
627 }
628
629 /* Print source lines in LINES to UIOUT.
630
631 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
632 instructions corresponding to that source line. When printing a new source
633 line, we do the cleanups for the open chain and open a new cleanup chain for
634 the new source line. If the source line range in LINES is not empty, this
635 function will leave the cleanup chain for the last printed source line open
636 so instructions can be added to it. */
637
638 static void
639 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
640 struct cleanup **ui_item_chain, int flags)
641 {
642 enum print_source_lines_flags psl_flags;
643 int line;
644
645 psl_flags = 0;
646 if (flags & DISASSEMBLY_FILENAME)
647 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
648
649 for (line = lines.begin; line < lines.end; ++line)
650 {
651 if (*ui_item_chain != NULL)
652 do_cleanups (*ui_item_chain);
653
654 *ui_item_chain
655 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
656
657 print_source_lines (lines.symtab, line, line + 1, psl_flags);
658
659 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
660 }
661 }
662
663 /* Disassemble a section of the recorded instruction trace. */
664
665 static void
666 btrace_insn_history (struct ui_out *uiout,
667 const struct btrace_thread_info *btinfo,
668 const struct btrace_insn_iterator *begin,
669 const struct btrace_insn_iterator *end, int flags)
670 {
671 struct ui_file *stb;
672 struct cleanup *cleanups, *ui_item_chain;
673 struct disassemble_info di;
674 struct gdbarch *gdbarch;
675 struct btrace_insn_iterator it;
676 struct btrace_line_range last_lines;
677
678 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
679 btrace_insn_number (end));
680
681 flags |= DISASSEMBLY_SPECULATIVE;
682
683 gdbarch = target_gdbarch ();
684 stb = mem_fileopen ();
685 cleanups = make_cleanup_ui_file_delete (stb);
686 di = gdb_disassemble_info (gdbarch, stb);
687 last_lines = btrace_mk_line_range (NULL, 0, 0);
688
689 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
690
691 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
692 instructions corresponding to that line. */
693 ui_item_chain = NULL;
694
695 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
696 {
697 const struct btrace_insn *insn;
698
699 insn = btrace_insn_get (&it);
700
701 /* A NULL instruction indicates a gap in the trace. */
702 if (insn == NULL)
703 {
704 const struct btrace_config *conf;
705
706 conf = btrace_conf (btinfo);
707
708 /* We have trace so we must have a configuration. */
709 gdb_assert (conf != NULL);
710
711 btrace_ui_out_decode_error (uiout, it.function->errcode,
712 conf->format);
713 }
714 else
715 {
716 struct disasm_insn dinsn;
717
718 if ((flags & DISASSEMBLY_SOURCE) != 0)
719 {
720 struct btrace_line_range lines;
721
722 lines = btrace_find_line_range (insn->pc);
723 if (!btrace_line_range_is_empty (lines)
724 && !btrace_line_range_contains_range (last_lines, lines))
725 {
726 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
727 last_lines = lines;
728 }
729 else if (ui_item_chain == NULL)
730 {
731 ui_item_chain
732 = make_cleanup_ui_out_tuple_begin_end (uiout,
733 "src_and_asm_line");
734 /* No source information. */
735 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
736 }
737
738 gdb_assert (ui_item_chain != NULL);
739 }
740
741 memset (&dinsn, 0, sizeof (dinsn));
742 dinsn.number = btrace_insn_number (&it);
743 dinsn.addr = insn->pc;
744
745 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
746 dinsn.is_speculative = 1;
747
748 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
749 }
750 }
751
752 do_cleanups (cleanups);
753 }
754
755 /* The to_insn_history method of target record-btrace. */
756
757 static void
758 record_btrace_insn_history (struct target_ops *self, int size, int flags)
759 {
760 struct btrace_thread_info *btinfo;
761 struct btrace_insn_history *history;
762 struct btrace_insn_iterator begin, end;
763 struct cleanup *uiout_cleanup;
764 struct ui_out *uiout;
765 unsigned int context, covered;
766
767 uiout = current_uiout;
768 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
769 "insn history");
770 context = abs (size);
771 if (context == 0)
772 error (_("Bad record instruction-history-size."));
773
774 btinfo = require_btrace ();
775 history = btinfo->insn_history;
776 if (history == NULL)
777 {
778 struct btrace_insn_iterator *replay;
779
780 DEBUG ("insn-history (0x%x): %d", flags, size);
781
782 /* If we're replaying, we start at the replay position. Otherwise, we
783 start at the tail of the trace. */
784 replay = btinfo->replay;
785 if (replay != NULL)
786 begin = *replay;
787 else
788 btrace_insn_end (&begin, btinfo);
789
790 /* We start from here and expand in the requested direction. Then we
791 expand in the other direction, as well, to fill up any remaining
792 context. */
793 end = begin;
794 if (size < 0)
795 {
796 /* We want the current position covered, as well. */
797 covered = btrace_insn_next (&end, 1);
798 covered += btrace_insn_prev (&begin, context - covered);
799 covered += btrace_insn_next (&end, context - covered);
800 }
801 else
802 {
803 covered = btrace_insn_next (&end, context);
804 covered += btrace_insn_prev (&begin, context - covered);
805 }
806 }
807 else
808 {
809 begin = history->begin;
810 end = history->end;
811
812 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
813 btrace_insn_number (&begin), btrace_insn_number (&end));
814
815 if (size < 0)
816 {
817 end = begin;
818 covered = btrace_insn_prev (&begin, context);
819 }
820 else
821 {
822 begin = end;
823 covered = btrace_insn_next (&end, context);
824 }
825 }
826
827 if (covered > 0)
828 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
829 else
830 {
831 if (size < 0)
832 printf_unfiltered (_("At the start of the branch trace record.\n"));
833 else
834 printf_unfiltered (_("At the end of the branch trace record.\n"));
835 }
836
837 btrace_set_insn_history (btinfo, &begin, &end);
838 do_cleanups (uiout_cleanup);
839 }
840
841 /* The to_insn_history_range method of target record-btrace. */
842
843 static void
844 record_btrace_insn_history_range (struct target_ops *self,
845 ULONGEST from, ULONGEST to, int flags)
846 {
847 struct btrace_thread_info *btinfo;
848 struct btrace_insn_history *history;
849 struct btrace_insn_iterator begin, end;
850 struct cleanup *uiout_cleanup;
851 struct ui_out *uiout;
852 unsigned int low, high;
853 int found;
854
855 uiout = current_uiout;
856 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
857 "insn history");
858 low = from;
859 high = to;
860
861 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
862
863 /* Check for wrap-arounds. */
864 if (low != from || high != to)
865 error (_("Bad range."));
866
867 if (high < low)
868 error (_("Bad range."));
869
870 btinfo = require_btrace ();
871
872 found = btrace_find_insn_by_number (&begin, btinfo, low);
873 if (found == 0)
874 error (_("Range out of bounds."));
875
876 found = btrace_find_insn_by_number (&end, btinfo, high);
877 if (found == 0)
878 {
879 /* Silently truncate the range. */
880 btrace_insn_end (&end, btinfo);
881 }
882 else
883 {
884 /* We want both begin and end to be inclusive. */
885 btrace_insn_next (&end, 1);
886 }
887
888 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
889 btrace_set_insn_history (btinfo, &begin, &end);
890
891 do_cleanups (uiout_cleanup);
892 }
893
894 /* The to_insn_history_from method of target record-btrace. */
895
896 static void
897 record_btrace_insn_history_from (struct target_ops *self,
898 ULONGEST from, int size, int flags)
899 {
900 ULONGEST begin, end, context;
901
902 context = abs (size);
903 if (context == 0)
904 error (_("Bad record instruction-history-size."));
905
906 if (size < 0)
907 {
908 end = from;
909
910 if (from < context)
911 begin = 0;
912 else
913 begin = from - context + 1;
914 }
915 else
916 {
917 begin = from;
918 end = from + context - 1;
919
920 /* Check for wrap-around. */
921 if (end < begin)
922 end = ULONGEST_MAX;
923 }
924
925 record_btrace_insn_history_range (self, begin, end, flags);
926 }
927
928 /* Print the instruction number range for a function call history line. */
929
930 static void
931 btrace_call_history_insn_range (struct ui_out *uiout,
932 const struct btrace_function *bfun)
933 {
934 unsigned int begin, end, size;
935
936 size = VEC_length (btrace_insn_s, bfun->insn);
937 gdb_assert (size > 0);
938
939 begin = bfun->insn_offset;
940 end = begin + size - 1;
941
942 ui_out_field_uint (uiout, "insn begin", begin);
943 ui_out_text (uiout, ",");
944 ui_out_field_uint (uiout, "insn end", end);
945 }
946
947 /* Compute the lowest and highest source line for the instructions in BFUN
948 and return them in PBEGIN and PEND.
949 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
950 result from inlining or macro expansion. */
951
952 static void
953 btrace_compute_src_line_range (const struct btrace_function *bfun,
954 int *pbegin, int *pend)
955 {
956 struct btrace_insn *insn;
957 struct symtab *symtab;
958 struct symbol *sym;
959 unsigned int idx;
960 int begin, end;
961
962 begin = INT_MAX;
963 end = INT_MIN;
964
965 sym = bfun->sym;
966 if (sym == NULL)
967 goto out;
968
969 symtab = symbol_symtab (sym);
970
971 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
972 {
973 struct symtab_and_line sal;
974
975 sal = find_pc_line (insn->pc, 0);
976 if (sal.symtab != symtab || sal.line == 0)
977 continue;
978
979 begin = min (begin, sal.line);
980 end = max (end, sal.line);
981 }
982
983 out:
984 *pbegin = begin;
985 *pend = end;
986 }
987
988 /* Print the source line information for a function call history line. */
989
990 static void
991 btrace_call_history_src_line (struct ui_out *uiout,
992 const struct btrace_function *bfun)
993 {
994 struct symbol *sym;
995 int begin, end;
996
997 sym = bfun->sym;
998 if (sym == NULL)
999 return;
1000
1001 ui_out_field_string (uiout, "file",
1002 symtab_to_filename_for_display (symbol_symtab (sym)));
1003
1004 btrace_compute_src_line_range (bfun, &begin, &end);
1005 if (end < begin)
1006 return;
1007
1008 ui_out_text (uiout, ":");
1009 ui_out_field_int (uiout, "min line", begin);
1010
1011 if (end == begin)
1012 return;
1013
1014 ui_out_text (uiout, ",");
1015 ui_out_field_int (uiout, "max line", end);
1016 }
1017
1018 /* Get the name of a branch trace function. */
1019
1020 static const char *
1021 btrace_get_bfun_name (const struct btrace_function *bfun)
1022 {
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 if (bfun == NULL)
1027 return "??";
1028
1029 msym = bfun->msym;
1030 sym = bfun->sym;
1031
1032 if (sym != NULL)
1033 return SYMBOL_PRINT_NAME (sym);
1034 else if (msym != NULL)
1035 return MSYMBOL_PRINT_NAME (msym);
1036 else
1037 return "??";
1038 }
1039
1040 /* Disassemble a section of the recorded function trace. */
1041
1042 static void
1043 btrace_call_history (struct ui_out *uiout,
1044 const struct btrace_thread_info *btinfo,
1045 const struct btrace_call_iterator *begin,
1046 const struct btrace_call_iterator *end,
1047 enum record_print_flag flags)
1048 {
1049 struct btrace_call_iterator it;
1050
1051 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
1052 btrace_call_number (end));
1053
1054 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1055 {
1056 const struct btrace_function *bfun;
1057 struct minimal_symbol *msym;
1058 struct symbol *sym;
1059
1060 bfun = btrace_call_get (&it);
1061 sym = bfun->sym;
1062 msym = bfun->msym;
1063
1064 /* Print the function index. */
1065 ui_out_field_uint (uiout, "index", bfun->number);
1066 ui_out_text (uiout, "\t");
1067
1068 /* Indicate gaps in the trace. */
1069 if (bfun->errcode != 0)
1070 {
1071 const struct btrace_config *conf;
1072
1073 conf = btrace_conf (btinfo);
1074
1075 /* We have trace so we must have a configuration. */
1076 gdb_assert (conf != NULL);
1077
1078 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1079
1080 continue;
1081 }
1082
1083 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1084 {
1085 int level = bfun->level + btinfo->level, i;
1086
1087 for (i = 0; i < level; ++i)
1088 ui_out_text (uiout, " ");
1089 }
1090
1091 if (sym != NULL)
1092 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1093 else if (msym != NULL)
1094 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1095 else if (!ui_out_is_mi_like_p (uiout))
1096 ui_out_field_string (uiout, "function", "??");
1097
1098 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1099 {
1100 ui_out_text (uiout, _("\tinst "));
1101 btrace_call_history_insn_range (uiout, bfun);
1102 }
1103
1104 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1105 {
1106 ui_out_text (uiout, _("\tat "));
1107 btrace_call_history_src_line (uiout, bfun);
1108 }
1109
1110 ui_out_text (uiout, "\n");
1111 }
1112 }
1113
1114 /* The to_call_history method of target record-btrace. */
1115
1116 static void
1117 record_btrace_call_history (struct target_ops *self, int size, int flags)
1118 {
1119 struct btrace_thread_info *btinfo;
1120 struct btrace_call_history *history;
1121 struct btrace_call_iterator begin, end;
1122 struct cleanup *uiout_cleanup;
1123 struct ui_out *uiout;
1124 unsigned int context, covered;
1125
1126 uiout = current_uiout;
1127 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1128 "insn history");
1129 context = abs (size);
1130 if (context == 0)
1131 error (_("Bad record function-call-history-size."));
1132
1133 btinfo = require_btrace ();
1134 history = btinfo->call_history;
1135 if (history == NULL)
1136 {
1137 struct btrace_insn_iterator *replay;
1138
1139 DEBUG ("call-history (0x%x): %d", flags, size);
1140
1141 /* If we're replaying, we start at the replay position. Otherwise, we
1142 start at the tail of the trace. */
1143 replay = btinfo->replay;
1144 if (replay != NULL)
1145 {
1146 begin.function = replay->function;
1147 begin.btinfo = btinfo;
1148 }
1149 else
1150 btrace_call_end (&begin, btinfo);
1151
1152 /* We start from here and expand in the requested direction. Then we
1153 expand in the other direction, as well, to fill up any remaining
1154 context. */
1155 end = begin;
1156 if (size < 0)
1157 {
1158 /* We want the current position covered, as well. */
1159 covered = btrace_call_next (&end, 1);
1160 covered += btrace_call_prev (&begin, context - covered);
1161 covered += btrace_call_next (&end, context - covered);
1162 }
1163 else
1164 {
1165 covered = btrace_call_next (&end, context);
1166 covered += btrace_call_prev (&begin, context- covered);
1167 }
1168 }
1169 else
1170 {
1171 begin = history->begin;
1172 end = history->end;
1173
1174 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1175 btrace_call_number (&begin), btrace_call_number (&end));
1176
1177 if (size < 0)
1178 {
1179 end = begin;
1180 covered = btrace_call_prev (&begin, context);
1181 }
1182 else
1183 {
1184 begin = end;
1185 covered = btrace_call_next (&end, context);
1186 }
1187 }
1188
1189 if (covered > 0)
1190 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1191 else
1192 {
1193 if (size < 0)
1194 printf_unfiltered (_("At the start of the branch trace record.\n"));
1195 else
1196 printf_unfiltered (_("At the end of the branch trace record.\n"));
1197 }
1198
1199 btrace_set_call_history (btinfo, &begin, &end);
1200 do_cleanups (uiout_cleanup);
1201 }
1202
1203 /* The to_call_history_range method of target record-btrace. */
1204
1205 static void
1206 record_btrace_call_history_range (struct target_ops *self,
1207 ULONGEST from, ULONGEST to, int flags)
1208 {
1209 struct btrace_thread_info *btinfo;
1210 struct btrace_call_history *history;
1211 struct btrace_call_iterator begin, end;
1212 struct cleanup *uiout_cleanup;
1213 struct ui_out *uiout;
1214 unsigned int low, high;
1215 int found;
1216
1217 uiout = current_uiout;
1218 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1219 "func history");
1220 low = from;
1221 high = to;
1222
1223 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1224
1225 /* Check for wrap-arounds. */
1226 if (low != from || high != to)
1227 error (_("Bad range."));
1228
1229 if (high < low)
1230 error (_("Bad range."));
1231
1232 btinfo = require_btrace ();
1233
1234 found = btrace_find_call_by_number (&begin, btinfo, low);
1235 if (found == 0)
1236 error (_("Range out of bounds."));
1237
1238 found = btrace_find_call_by_number (&end, btinfo, high);
1239 if (found == 0)
1240 {
1241 /* Silently truncate the range. */
1242 btrace_call_end (&end, btinfo);
1243 }
1244 else
1245 {
1246 /* We want both begin and end to be inclusive. */
1247 btrace_call_next (&end, 1);
1248 }
1249
1250 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1251 btrace_set_call_history (btinfo, &begin, &end);
1252
1253 do_cleanups (uiout_cleanup);
1254 }
1255
1256 /* The to_call_history_from method of target record-btrace. */
1257
1258 static void
1259 record_btrace_call_history_from (struct target_ops *self,
1260 ULONGEST from, int size, int flags)
1261 {
1262 ULONGEST begin, end, context;
1263
1264 context = abs (size);
1265 if (context == 0)
1266 error (_("Bad record function-call-history-size."));
1267
1268 if (size < 0)
1269 {
1270 end = from;
1271
1272 if (from < context)
1273 begin = 0;
1274 else
1275 begin = from - context + 1;
1276 }
1277 else
1278 {
1279 begin = from;
1280 end = from + context - 1;
1281
1282 /* Check for wrap-around. */
1283 if (end < begin)
1284 end = ULONGEST_MAX;
1285 }
1286
1287 record_btrace_call_history_range (self, begin, end, flags);
1288 }
1289
1290 /* The to_record_is_replaying method of target record-btrace. */
1291
1292 static int
1293 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1294 {
1295 struct thread_info *tp;
1296
1297 ALL_NON_EXITED_THREADS (tp)
1298 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1299 return 1;
1300
1301 return 0;
1302 }
1303
1304 /* The to_record_will_replay method of target record-btrace. */
1305
1306 static int
1307 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1308 {
1309 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1310 }
1311
1312 /* The to_xfer_partial method of target record-btrace. */
1313
1314 static enum target_xfer_status
1315 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1316 const char *annex, gdb_byte *readbuf,
1317 const gdb_byte *writebuf, ULONGEST offset,
1318 ULONGEST len, ULONGEST *xfered_len)
1319 {
1320 struct target_ops *t;
1321
1322 /* Filter out requests that don't make sense during replay. */
1323 if (replay_memory_access == replay_memory_access_read_only
1324 && !record_btrace_generating_corefile
1325 && record_btrace_is_replaying (ops, inferior_ptid))
1326 {
1327 switch (object)
1328 {
1329 case TARGET_OBJECT_MEMORY:
1330 {
1331 struct target_section *section;
1332
1333 /* We do not allow writing memory in general. */
1334 if (writebuf != NULL)
1335 {
1336 *xfered_len = len;
1337 return TARGET_XFER_UNAVAILABLE;
1338 }
1339
1340 /* We allow reading readonly memory. */
1341 section = target_section_by_addr (ops, offset);
1342 if (section != NULL)
1343 {
1344 /* Check if the section we found is readonly. */
1345 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1346 section->the_bfd_section)
1347 & SEC_READONLY) != 0)
1348 {
1349 /* Truncate the request to fit into this section. */
1350 len = min (len, section->endaddr - offset);
1351 break;
1352 }
1353 }
1354
1355 *xfered_len = len;
1356 return TARGET_XFER_UNAVAILABLE;
1357 }
1358 }
1359 }
1360
1361 /* Forward the request. */
1362 ops = ops->beneath;
1363 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1364 offset, len, xfered_len);
1365 }
1366
1367 /* The to_insert_breakpoint method of target record-btrace. */
1368
1369 static int
1370 record_btrace_insert_breakpoint (struct target_ops *ops,
1371 struct gdbarch *gdbarch,
1372 struct bp_target_info *bp_tgt)
1373 {
1374 const char *old;
1375 int ret;
1376
1377 /* Inserting breakpoints requires accessing memory. Allow it for the
1378 duration of this function. */
1379 old = replay_memory_access;
1380 replay_memory_access = replay_memory_access_read_write;
1381
1382 ret = 0;
1383 TRY
1384 {
1385 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1386 }
1387 CATCH (except, RETURN_MASK_ALL)
1388 {
1389 replay_memory_access = old;
1390 throw_exception (except);
1391 }
1392 END_CATCH
1393 replay_memory_access = old;
1394
1395 return ret;
1396 }
1397
1398 /* The to_remove_breakpoint method of target record-btrace. */
1399
1400 static int
1401 record_btrace_remove_breakpoint (struct target_ops *ops,
1402 struct gdbarch *gdbarch,
1403 struct bp_target_info *bp_tgt)
1404 {
1405 const char *old;
1406 int ret;
1407
1408 /* Removing breakpoints requires accessing memory. Allow it for the
1409 duration of this function. */
1410 old = replay_memory_access;
1411 replay_memory_access = replay_memory_access_read_write;
1412
1413 ret = 0;
1414 TRY
1415 {
1416 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1417 }
1418 CATCH (except, RETURN_MASK_ALL)
1419 {
1420 replay_memory_access = old;
1421 throw_exception (except);
1422 }
1423 END_CATCH
1424 replay_memory_access = old;
1425
1426 return ret;
1427 }
1428
1429 /* The to_fetch_registers method of target record-btrace. */
1430
1431 static void
1432 record_btrace_fetch_registers (struct target_ops *ops,
1433 struct regcache *regcache, int regno)
1434 {
1435 struct btrace_insn_iterator *replay;
1436 struct thread_info *tp;
1437
1438 tp = find_thread_ptid (inferior_ptid);
1439 gdb_assert (tp != NULL);
1440
1441 replay = tp->btrace.replay;
1442 if (replay != NULL && !record_btrace_generating_corefile)
1443 {
1444 const struct btrace_insn *insn;
1445 struct gdbarch *gdbarch;
1446 int pcreg;
1447
1448 gdbarch = get_regcache_arch (regcache);
1449 pcreg = gdbarch_pc_regnum (gdbarch);
1450 if (pcreg < 0)
1451 return;
1452
1453 /* We can only provide the PC register. */
1454 if (regno >= 0 && regno != pcreg)
1455 return;
1456
1457 insn = btrace_insn_get (replay);
1458 gdb_assert (insn != NULL);
1459
1460 regcache_raw_supply (regcache, regno, &insn->pc);
1461 }
1462 else
1463 {
1464 struct target_ops *t = ops->beneath;
1465
1466 t->to_fetch_registers (t, regcache, regno);
1467 }
1468 }
1469
1470 /* The to_store_registers method of target record-btrace. */
1471
1472 static void
1473 record_btrace_store_registers (struct target_ops *ops,
1474 struct regcache *regcache, int regno)
1475 {
1476 struct target_ops *t;
1477
1478 if (!record_btrace_generating_corefile
1479 && record_btrace_is_replaying (ops, inferior_ptid))
1480 error (_("Cannot write registers while replaying."));
1481
1482 gdb_assert (may_write_registers != 0);
1483
1484 t = ops->beneath;
1485 t->to_store_registers (t, regcache, regno);
1486 }
1487
1488 /* The to_prepare_to_store method of target record-btrace. */
1489
1490 static void
1491 record_btrace_prepare_to_store (struct target_ops *ops,
1492 struct regcache *regcache)
1493 {
1494 struct target_ops *t;
1495
1496 if (!record_btrace_generating_corefile
1497 && record_btrace_is_replaying (ops, inferior_ptid))
1498 return;
1499
1500 t = ops->beneath;
1501 t->to_prepare_to_store (t, regcache);
1502 }
1503
1504 /* The branch trace frame cache. */
1505
1506 struct btrace_frame_cache
1507 {
1508 /* The thread. */
1509 struct thread_info *tp;
1510
1511 /* The frame info. */
1512 struct frame_info *frame;
1513
1514 /* The branch trace function segment. */
1515 const struct btrace_function *bfun;
1516 };
1517
1518 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1519
1520 static htab_t bfcache;
1521
1522 /* hash_f for htab_create_alloc of bfcache. */
1523
1524 static hashval_t
1525 bfcache_hash (const void *arg)
1526 {
1527 const struct btrace_frame_cache *cache
1528 = (const struct btrace_frame_cache *) arg;
1529
1530 return htab_hash_pointer (cache->frame);
1531 }
1532
1533 /* eq_f for htab_create_alloc of bfcache. */
1534
1535 static int
1536 bfcache_eq (const void *arg1, const void *arg2)
1537 {
1538 const struct btrace_frame_cache *cache1
1539 = (const struct btrace_frame_cache *) arg1;
1540 const struct btrace_frame_cache *cache2
1541 = (const struct btrace_frame_cache *) arg2;
1542
1543 return cache1->frame == cache2->frame;
1544 }
1545
1546 /* Create a new btrace frame cache. */
1547
1548 static struct btrace_frame_cache *
1549 bfcache_new (struct frame_info *frame)
1550 {
1551 struct btrace_frame_cache *cache;
1552 void **slot;
1553
1554 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1555 cache->frame = frame;
1556
1557 slot = htab_find_slot (bfcache, cache, INSERT);
1558 gdb_assert (*slot == NULL);
1559 *slot = cache;
1560
1561 return cache;
1562 }
1563
1564 /* Extract the branch trace function from a branch trace frame. */
1565
1566 static const struct btrace_function *
1567 btrace_get_frame_function (struct frame_info *frame)
1568 {
1569 const struct btrace_frame_cache *cache;
1570 const struct btrace_function *bfun;
1571 struct btrace_frame_cache pattern;
1572 void **slot;
1573
1574 pattern.frame = frame;
1575
1576 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1577 if (slot == NULL)
1578 return NULL;
1579
1580 cache = (const struct btrace_frame_cache *) *slot;
1581 return cache->bfun;
1582 }
1583
1584 /* Implement stop_reason method for record_btrace_frame_unwind. */
1585
1586 static enum unwind_stop_reason
1587 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1588 void **this_cache)
1589 {
1590 const struct btrace_frame_cache *cache;
1591 const struct btrace_function *bfun;
1592
1593 cache = (const struct btrace_frame_cache *) *this_cache;
1594 bfun = cache->bfun;
1595 gdb_assert (bfun != NULL);
1596
1597 if (bfun->up == NULL)
1598 return UNWIND_UNAVAILABLE;
1599
1600 return UNWIND_NO_REASON;
1601 }
1602
1603 /* Implement this_id method for record_btrace_frame_unwind. */
1604
1605 static void
1606 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1607 struct frame_id *this_id)
1608 {
1609 const struct btrace_frame_cache *cache;
1610 const struct btrace_function *bfun;
1611 CORE_ADDR code, special;
1612
1613 cache = (const struct btrace_frame_cache *) *this_cache;
1614
1615 bfun = cache->bfun;
1616 gdb_assert (bfun != NULL);
1617
1618 while (bfun->segment.prev != NULL)
1619 bfun = bfun->segment.prev;
1620
1621 code = get_frame_func (this_frame);
1622 special = bfun->number;
1623
1624 *this_id = frame_id_build_unavailable_stack_special (code, special);
1625
1626 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1627 btrace_get_bfun_name (cache->bfun),
1628 core_addr_to_string_nz (this_id->code_addr),
1629 core_addr_to_string_nz (this_id->special_addr));
1630 }
1631
1632 /* Implement prev_register method for record_btrace_frame_unwind. */
1633
1634 static struct value *
1635 record_btrace_frame_prev_register (struct frame_info *this_frame,
1636 void **this_cache,
1637 int regnum)
1638 {
1639 const struct btrace_frame_cache *cache;
1640 const struct btrace_function *bfun, *caller;
1641 const struct btrace_insn *insn;
1642 struct gdbarch *gdbarch;
1643 CORE_ADDR pc;
1644 int pcreg;
1645
1646 gdbarch = get_frame_arch (this_frame);
1647 pcreg = gdbarch_pc_regnum (gdbarch);
1648 if (pcreg < 0 || regnum != pcreg)
1649 throw_error (NOT_AVAILABLE_ERROR,
1650 _("Registers are not available in btrace record history"));
1651
1652 cache = (const struct btrace_frame_cache *) *this_cache;
1653 bfun = cache->bfun;
1654 gdb_assert (bfun != NULL);
1655
1656 caller = bfun->up;
1657 if (caller == NULL)
1658 throw_error (NOT_AVAILABLE_ERROR,
1659 _("No caller in btrace record history"));
1660
1661 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1662 {
1663 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1664 pc = insn->pc;
1665 }
1666 else
1667 {
1668 insn = VEC_last (btrace_insn_s, caller->insn);
1669 pc = insn->pc;
1670
1671 pc += gdb_insn_length (gdbarch, pc);
1672 }
1673
1674 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1675 btrace_get_bfun_name (bfun), bfun->level,
1676 core_addr_to_string_nz (pc));
1677
1678 return frame_unwind_got_address (this_frame, regnum, pc);
1679 }
1680
1681 /* Implement sniffer method for record_btrace_frame_unwind. */
1682
1683 static int
1684 record_btrace_frame_sniffer (const struct frame_unwind *self,
1685 struct frame_info *this_frame,
1686 void **this_cache)
1687 {
1688 const struct btrace_function *bfun;
1689 struct btrace_frame_cache *cache;
1690 struct thread_info *tp;
1691 struct frame_info *next;
1692
1693 /* THIS_FRAME does not contain a reference to its thread. */
1694 tp = find_thread_ptid (inferior_ptid);
1695 gdb_assert (tp != NULL);
1696
1697 bfun = NULL;
1698 next = get_next_frame (this_frame);
1699 if (next == NULL)
1700 {
1701 const struct btrace_insn_iterator *replay;
1702
1703 replay = tp->btrace.replay;
1704 if (replay != NULL)
1705 bfun = replay->function;
1706 }
1707 else
1708 {
1709 const struct btrace_function *callee;
1710
1711 callee = btrace_get_frame_function (next);
1712 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1713 bfun = callee->up;
1714 }
1715
1716 if (bfun == NULL)
1717 return 0;
1718
1719 DEBUG ("[frame] sniffed frame for %s on level %d",
1720 btrace_get_bfun_name (bfun), bfun->level);
1721
1722 /* This is our frame. Initialize the frame cache. */
1723 cache = bfcache_new (this_frame);
1724 cache->tp = tp;
1725 cache->bfun = bfun;
1726
1727 *this_cache = cache;
1728 return 1;
1729 }
1730
1731 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1732
1733 static int
1734 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1735 struct frame_info *this_frame,
1736 void **this_cache)
1737 {
1738 const struct btrace_function *bfun, *callee;
1739 struct btrace_frame_cache *cache;
1740 struct frame_info *next;
1741
1742 next = get_next_frame (this_frame);
1743 if (next == NULL)
1744 return 0;
1745
1746 callee = btrace_get_frame_function (next);
1747 if (callee == NULL)
1748 return 0;
1749
1750 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1751 return 0;
1752
1753 bfun = callee->up;
1754 if (bfun == NULL)
1755 return 0;
1756
1757 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1758 btrace_get_bfun_name (bfun), bfun->level);
1759
1760 /* This is our frame. Initialize the frame cache. */
1761 cache = bfcache_new (this_frame);
1762 cache->tp = find_thread_ptid (inferior_ptid);
1763 cache->bfun = bfun;
1764
1765 *this_cache = cache;
1766 return 1;
1767 }
1768
1769 static void
1770 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1771 {
1772 struct btrace_frame_cache *cache;
1773 void **slot;
1774
1775 cache = (struct btrace_frame_cache *) this_cache;
1776
1777 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1778 gdb_assert (slot != NULL);
1779
1780 htab_remove_elt (bfcache, cache);
1781 }
1782
1783 /* btrace recording does not store previous memory content, neither the stack
1784 frames content. Any unwinding would return errorneous results as the stack
1785 contents no longer matches the changed PC value restored from history.
1786 Therefore this unwinder reports any possibly unwound registers as
1787 <unavailable>. */
1788
1789 const struct frame_unwind record_btrace_frame_unwind =
1790 {
1791 NORMAL_FRAME,
1792 record_btrace_frame_unwind_stop_reason,
1793 record_btrace_frame_this_id,
1794 record_btrace_frame_prev_register,
1795 NULL,
1796 record_btrace_frame_sniffer,
1797 record_btrace_frame_dealloc_cache
1798 };
1799
1800 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1801 {
1802 TAILCALL_FRAME,
1803 record_btrace_frame_unwind_stop_reason,
1804 record_btrace_frame_this_id,
1805 record_btrace_frame_prev_register,
1806 NULL,
1807 record_btrace_tailcall_frame_sniffer,
1808 record_btrace_frame_dealloc_cache
1809 };
1810
1811 /* Implement the to_get_unwinder method. */
1812
1813 static const struct frame_unwind *
1814 record_btrace_to_get_unwinder (struct target_ops *self)
1815 {
1816 return &record_btrace_frame_unwind;
1817 }
1818
1819 /* Implement the to_get_tailcall_unwinder method. */
1820
1821 static const struct frame_unwind *
1822 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1823 {
1824 return &record_btrace_tailcall_frame_unwind;
1825 }
1826
1827 /* Return a human-readable string for FLAG. */
1828
1829 static const char *
1830 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1831 {
1832 switch (flag)
1833 {
1834 case BTHR_STEP:
1835 return "step";
1836
1837 case BTHR_RSTEP:
1838 return "reverse-step";
1839
1840 case BTHR_CONT:
1841 return "cont";
1842
1843 case BTHR_RCONT:
1844 return "reverse-cont";
1845
1846 case BTHR_STOP:
1847 return "stop";
1848 }
1849
1850 return "<invalid>";
1851 }
1852
1853 /* Indicate that TP should be resumed according to FLAG. */
1854
1855 static void
1856 record_btrace_resume_thread (struct thread_info *tp,
1857 enum btrace_thread_flag flag)
1858 {
1859 struct btrace_thread_info *btinfo;
1860
1861 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1862 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1863
1864 btinfo = &tp->btrace;
1865
1866 /* Fetch the latest branch trace. */
1867 btrace_fetch (tp);
1868
1869 /* A resume request overwrites a preceding resume or stop request. */
1870 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1871 btinfo->flags |= flag;
1872 }
1873
1874 /* Get the current frame for TP. */
1875
1876 static struct frame_info *
1877 get_thread_current_frame (struct thread_info *tp)
1878 {
1879 struct frame_info *frame;
1880 ptid_t old_inferior_ptid;
1881 int executing;
1882
1883 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1884 old_inferior_ptid = inferior_ptid;
1885 inferior_ptid = tp->ptid;
1886
1887 /* Clear the executing flag to allow changes to the current frame.
1888 We are not actually running, yet. We just started a reverse execution
1889 command or a record goto command.
1890 For the latter, EXECUTING is false and this has no effect.
1891 For the former, EXECUTING is true and we're in to_wait, about to
1892 move the thread. Since we need to recompute the stack, we temporarily
1893 set EXECUTING to flase. */
1894 executing = is_executing (inferior_ptid);
1895 set_executing (inferior_ptid, 0);
1896
1897 frame = NULL;
1898 TRY
1899 {
1900 frame = get_current_frame ();
1901 }
1902 CATCH (except, RETURN_MASK_ALL)
1903 {
1904 /* Restore the previous execution state. */
1905 set_executing (inferior_ptid, executing);
1906
1907 /* Restore the previous inferior_ptid. */
1908 inferior_ptid = old_inferior_ptid;
1909
1910 throw_exception (except);
1911 }
1912 END_CATCH
1913
1914 /* Restore the previous execution state. */
1915 set_executing (inferior_ptid, executing);
1916
1917 /* Restore the previous inferior_ptid. */
1918 inferior_ptid = old_inferior_ptid;
1919
1920 return frame;
1921 }
1922
1923 /* Start replaying a thread. */
1924
1925 static struct btrace_insn_iterator *
1926 record_btrace_start_replaying (struct thread_info *tp)
1927 {
1928 struct btrace_insn_iterator *replay;
1929 struct btrace_thread_info *btinfo;
1930
1931 btinfo = &tp->btrace;
1932 replay = NULL;
1933
1934 /* We can't start replaying without trace. */
1935 if (btinfo->begin == NULL)
1936 return NULL;
1937
1938 /* GDB stores the current frame_id when stepping in order to detects steps
1939 into subroutines.
1940 Since frames are computed differently when we're replaying, we need to
1941 recompute those stored frames and fix them up so we can still detect
1942 subroutines after we started replaying. */
1943 TRY
1944 {
1945 struct frame_info *frame;
1946 struct frame_id frame_id;
1947 int upd_step_frame_id, upd_step_stack_frame_id;
1948
1949 /* The current frame without replaying - computed via normal unwind. */
1950 frame = get_thread_current_frame (tp);
1951 frame_id = get_frame_id (frame);
1952
1953 /* Check if we need to update any stepping-related frame id's. */
1954 upd_step_frame_id = frame_id_eq (frame_id,
1955 tp->control.step_frame_id);
1956 upd_step_stack_frame_id = frame_id_eq (frame_id,
1957 tp->control.step_stack_frame_id);
1958
1959 /* We start replaying at the end of the branch trace. This corresponds
1960 to the current instruction. */
1961 replay = XNEW (struct btrace_insn_iterator);
1962 btrace_insn_end (replay, btinfo);
1963
1964 /* Skip gaps at the end of the trace. */
1965 while (btrace_insn_get (replay) == NULL)
1966 {
1967 unsigned int steps;
1968
1969 steps = btrace_insn_prev (replay, 1);
1970 if (steps == 0)
1971 error (_("No trace."));
1972 }
1973
1974 /* We're not replaying, yet. */
1975 gdb_assert (btinfo->replay == NULL);
1976 btinfo->replay = replay;
1977
1978 /* Make sure we're not using any stale registers. */
1979 registers_changed_ptid (tp->ptid);
1980
1981 /* The current frame with replaying - computed via btrace unwind. */
1982 frame = get_thread_current_frame (tp);
1983 frame_id = get_frame_id (frame);
1984
1985 /* Replace stepping related frames where necessary. */
1986 if (upd_step_frame_id)
1987 tp->control.step_frame_id = frame_id;
1988 if (upd_step_stack_frame_id)
1989 tp->control.step_stack_frame_id = frame_id;
1990 }
1991 CATCH (except, RETURN_MASK_ALL)
1992 {
1993 xfree (btinfo->replay);
1994 btinfo->replay = NULL;
1995
1996 registers_changed_ptid (tp->ptid);
1997
1998 throw_exception (except);
1999 }
2000 END_CATCH
2001
2002 return replay;
2003 }
2004
2005 /* Stop replaying a thread. */
2006
2007 static void
2008 record_btrace_stop_replaying (struct thread_info *tp)
2009 {
2010 struct btrace_thread_info *btinfo;
2011
2012 btinfo = &tp->btrace;
2013
2014 xfree (btinfo->replay);
2015 btinfo->replay = NULL;
2016
2017 /* Make sure we're not leaving any stale registers. */
2018 registers_changed_ptid (tp->ptid);
2019 }
2020
2021 /* Stop replaying TP if it is at the end of its execution history. */
2022
2023 static void
2024 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2025 {
2026 struct btrace_insn_iterator *replay, end;
2027 struct btrace_thread_info *btinfo;
2028
2029 btinfo = &tp->btrace;
2030 replay = btinfo->replay;
2031
2032 if (replay == NULL)
2033 return;
2034
2035 btrace_insn_end (&end, btinfo);
2036
2037 if (btrace_insn_cmp (replay, &end) == 0)
2038 record_btrace_stop_replaying (tp);
2039 }
2040
2041 /* The to_resume method of target record-btrace. */
2042
2043 static void
2044 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2045 enum gdb_signal signal)
2046 {
2047 struct thread_info *tp;
2048 enum btrace_thread_flag flag, cflag;
2049
2050 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2051 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2052 step ? "step" : "cont");
2053
2054 /* Store the execution direction of the last resume.
2055
2056 If there is more than one to_resume call, we have to rely on infrun
2057 to not change the execution direction in-between. */
2058 record_btrace_resume_exec_dir = execution_direction;
2059
2060 /* As long as we're not replaying, just forward the request.
2061
2062 For non-stop targets this means that no thread is replaying. In order to
2063 make progress, we may need to explicitly move replaying threads to the end
2064 of their execution history. */
2065 if ((execution_direction != EXEC_REVERSE)
2066 && !record_btrace_is_replaying (ops, minus_one_ptid))
2067 {
2068 ops = ops->beneath;
2069 ops->to_resume (ops, ptid, step, signal);
2070 return;
2071 }
2072
2073 /* Compute the btrace thread flag for the requested move. */
2074 if (execution_direction == EXEC_REVERSE)
2075 {
2076 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2077 cflag = BTHR_RCONT;
2078 }
2079 else
2080 {
2081 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2082 cflag = BTHR_CONT;
2083 }
2084
2085 /* We just indicate the resume intent here. The actual stepping happens in
2086 record_btrace_wait below.
2087
2088 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2089 if (!target_is_non_stop_p ())
2090 {
2091 gdb_assert (ptid_match (inferior_ptid, ptid));
2092
2093 ALL_NON_EXITED_THREADS (tp)
2094 if (ptid_match (tp->ptid, ptid))
2095 {
2096 if (ptid_match (tp->ptid, inferior_ptid))
2097 record_btrace_resume_thread (tp, flag);
2098 else
2099 record_btrace_resume_thread (tp, cflag);
2100 }
2101 }
2102 else
2103 {
2104 ALL_NON_EXITED_THREADS (tp)
2105 if (ptid_match (tp->ptid, ptid))
2106 record_btrace_resume_thread (tp, flag);
2107 }
2108
2109 /* Async support. */
2110 if (target_can_async_p ())
2111 {
2112 target_async (1);
2113 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2114 }
2115 }
2116
2117 /* Cancel resuming TP. */
2118
2119 static void
2120 record_btrace_cancel_resume (struct thread_info *tp)
2121 {
2122 enum btrace_thread_flag flags;
2123
2124 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2125 if (flags == 0)
2126 return;
2127
2128 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
2129 target_pid_to_str (tp->ptid), flags,
2130 btrace_thread_flag_to_str (flags));
2131
2132 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2133 record_btrace_stop_replaying_at_end (tp);
2134 }
2135
2136 /* Return a target_waitstatus indicating that we ran out of history. */
2137
2138 static struct target_waitstatus
2139 btrace_step_no_history (void)
2140 {
2141 struct target_waitstatus status;
2142
2143 status.kind = TARGET_WAITKIND_NO_HISTORY;
2144
2145 return status;
2146 }
2147
2148 /* Return a target_waitstatus indicating that a step finished. */
2149
2150 static struct target_waitstatus
2151 btrace_step_stopped (void)
2152 {
2153 struct target_waitstatus status;
2154
2155 status.kind = TARGET_WAITKIND_STOPPED;
2156 status.value.sig = GDB_SIGNAL_TRAP;
2157
2158 return status;
2159 }
2160
2161 /* Return a target_waitstatus indicating that a thread was stopped as
2162 requested. */
2163
2164 static struct target_waitstatus
2165 btrace_step_stopped_on_request (void)
2166 {
2167 struct target_waitstatus status;
2168
2169 status.kind = TARGET_WAITKIND_STOPPED;
2170 status.value.sig = GDB_SIGNAL_0;
2171
2172 return status;
2173 }
2174
2175 /* Return a target_waitstatus indicating a spurious stop. */
2176
2177 static struct target_waitstatus
2178 btrace_step_spurious (void)
2179 {
2180 struct target_waitstatus status;
2181
2182 status.kind = TARGET_WAITKIND_SPURIOUS;
2183
2184 return status;
2185 }
2186
2187 /* Return a target_waitstatus indicating that the thread was not resumed. */
2188
2189 static struct target_waitstatus
2190 btrace_step_no_resumed (void)
2191 {
2192 struct target_waitstatus status;
2193
2194 status.kind = TARGET_WAITKIND_NO_RESUMED;
2195
2196 return status;
2197 }
2198
2199 /* Return a target_waitstatus indicating that we should wait again. */
2200
2201 static struct target_waitstatus
2202 btrace_step_again (void)
2203 {
2204 struct target_waitstatus status;
2205
2206 status.kind = TARGET_WAITKIND_IGNORE;
2207
2208 return status;
2209 }
2210
2211 /* Clear the record histories. */
2212
2213 static void
2214 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2215 {
2216 xfree (btinfo->insn_history);
2217 xfree (btinfo->call_history);
2218
2219 btinfo->insn_history = NULL;
2220 btinfo->call_history = NULL;
2221 }
2222
2223 /* Check whether TP's current replay position is at a breakpoint. */
2224
2225 static int
2226 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2227 {
2228 struct btrace_insn_iterator *replay;
2229 struct btrace_thread_info *btinfo;
2230 const struct btrace_insn *insn;
2231 struct inferior *inf;
2232
2233 btinfo = &tp->btrace;
2234 replay = btinfo->replay;
2235
2236 if (replay == NULL)
2237 return 0;
2238
2239 insn = btrace_insn_get (replay);
2240 if (insn == NULL)
2241 return 0;
2242
2243 inf = find_inferior_ptid (tp->ptid);
2244 if (inf == NULL)
2245 return 0;
2246
2247 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2248 &btinfo->stop_reason);
2249 }
2250
2251 /* Step one instruction in forward direction. */
2252
2253 static struct target_waitstatus
2254 record_btrace_single_step_forward (struct thread_info *tp)
2255 {
2256 struct btrace_insn_iterator *replay, end;
2257 struct btrace_thread_info *btinfo;
2258
2259 btinfo = &tp->btrace;
2260 replay = btinfo->replay;
2261
2262 /* We're done if we're not replaying. */
2263 if (replay == NULL)
2264 return btrace_step_no_history ();
2265
2266 /* Check if we're stepping a breakpoint. */
2267 if (record_btrace_replay_at_breakpoint (tp))
2268 return btrace_step_stopped ();
2269
2270 /* Skip gaps during replay. */
2271 do
2272 {
2273 unsigned int steps;
2274
2275 /* We will bail out here if we continue stepping after reaching the end
2276 of the execution history. */
2277 steps = btrace_insn_next (replay, 1);
2278 if (steps == 0)
2279 return btrace_step_no_history ();
2280 }
2281 while (btrace_insn_get (replay) == NULL);
2282
2283 /* Determine the end of the instruction trace. */
2284 btrace_insn_end (&end, btinfo);
2285
2286 /* The execution trace contains (and ends with) the current instruction.
2287 This instruction has not been executed, yet, so the trace really ends
2288 one instruction earlier. */
2289 if (btrace_insn_cmp (replay, &end) == 0)
2290 return btrace_step_no_history ();
2291
2292 return btrace_step_spurious ();
2293 }
2294
2295 /* Step one instruction in backward direction. */
2296
2297 static struct target_waitstatus
2298 record_btrace_single_step_backward (struct thread_info *tp)
2299 {
2300 struct btrace_insn_iterator *replay;
2301 struct btrace_thread_info *btinfo;
2302
2303 btinfo = &tp->btrace;
2304 replay = btinfo->replay;
2305
2306 /* Start replaying if we're not already doing so. */
2307 if (replay == NULL)
2308 replay = record_btrace_start_replaying (tp);
2309
2310 /* If we can't step any further, we reached the end of the history.
2311 Skip gaps during replay. */
2312 do
2313 {
2314 unsigned int steps;
2315
2316 steps = btrace_insn_prev (replay, 1);
2317 if (steps == 0)
2318 return btrace_step_no_history ();
2319 }
2320 while (btrace_insn_get (replay) == NULL);
2321
2322 /* Check if we're stepping a breakpoint.
2323
2324 For reverse-stepping, this check is after the step. There is logic in
2325 infrun.c that handles reverse-stepping separately. See, for example,
2326 proceed and adjust_pc_after_break.
2327
2328 This code assumes that for reverse-stepping, PC points to the last
2329 de-executed instruction, whereas for forward-stepping PC points to the
2330 next to-be-executed instruction. */
2331 if (record_btrace_replay_at_breakpoint (tp))
2332 return btrace_step_stopped ();
2333
2334 return btrace_step_spurious ();
2335 }
2336
2337 /* Step a single thread. */
2338
2339 static struct target_waitstatus
2340 record_btrace_step_thread (struct thread_info *tp)
2341 {
2342 struct btrace_thread_info *btinfo;
2343 struct target_waitstatus status;
2344 enum btrace_thread_flag flags;
2345
2346 btinfo = &tp->btrace;
2347
2348 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2349 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2350
2351 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2352 target_pid_to_str (tp->ptid), flags,
2353 btrace_thread_flag_to_str (flags));
2354
2355 /* We can't step without an execution history. */
2356 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2357 return btrace_step_no_history ();
2358
2359 switch (flags)
2360 {
2361 default:
2362 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2363
2364 case BTHR_STOP:
2365 return btrace_step_stopped_on_request ();
2366
2367 case BTHR_STEP:
2368 status = record_btrace_single_step_forward (tp);
2369 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2370 break;
2371
2372 return btrace_step_stopped ();
2373
2374 case BTHR_RSTEP:
2375 status = record_btrace_single_step_backward (tp);
2376 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2377 break;
2378
2379 return btrace_step_stopped ();
2380
2381 case BTHR_CONT:
2382 status = record_btrace_single_step_forward (tp);
2383 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2384 break;
2385
2386 btinfo->flags |= flags;
2387 return btrace_step_again ();
2388
2389 case BTHR_RCONT:
2390 status = record_btrace_single_step_backward (tp);
2391 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2392 break;
2393
2394 btinfo->flags |= flags;
2395 return btrace_step_again ();
2396 }
2397
2398 /* We keep threads moving at the end of their execution history. The to_wait
2399 method will stop the thread for whom the event is reported. */
2400 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2401 btinfo->flags |= flags;
2402
2403 return status;
2404 }
2405
2406 /* A vector of threads. */
2407
2408 typedef struct thread_info * tp_t;
2409 DEF_VEC_P (tp_t);
2410
2411 /* Announce further events if necessary. */
2412
2413 static void
2414 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2415 const VEC (tp_t) *no_history)
2416 {
2417 int more_moving, more_no_history;
2418
2419 more_moving = !VEC_empty (tp_t, moving);
2420 more_no_history = !VEC_empty (tp_t, no_history);
2421
2422 if (!more_moving && !more_no_history)
2423 return;
2424
2425 if (more_moving)
2426 DEBUG ("movers pending");
2427
2428 if (more_no_history)
2429 DEBUG ("no-history pending");
2430
2431 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2432 }
2433
2434 /* The to_wait method of target record-btrace. */
2435
2436 static ptid_t
2437 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2438 struct target_waitstatus *status, int options)
2439 {
2440 VEC (tp_t) *moving, *no_history;
2441 struct thread_info *tp, *eventing;
2442 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2443
2444 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2445
2446 /* As long as we're not replaying, just forward the request. */
2447 if ((execution_direction != EXEC_REVERSE)
2448 && !record_btrace_is_replaying (ops, minus_one_ptid))
2449 {
2450 ops = ops->beneath;
2451 return ops->to_wait (ops, ptid, status, options);
2452 }
2453
2454 moving = NULL;
2455 no_history = NULL;
2456
2457 make_cleanup (VEC_cleanup (tp_t), &moving);
2458 make_cleanup (VEC_cleanup (tp_t), &no_history);
2459
2460 /* Keep a work list of moving threads. */
2461 ALL_NON_EXITED_THREADS (tp)
2462 if (ptid_match (tp->ptid, ptid)
2463 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2464 VEC_safe_push (tp_t, moving, tp);
2465
2466 if (VEC_empty (tp_t, moving))
2467 {
2468 *status = btrace_step_no_resumed ();
2469
2470 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2471 target_waitstatus_to_string (status));
2472
2473 do_cleanups (cleanups);
2474 return null_ptid;
2475 }
2476
2477 /* Step moving threads one by one, one step each, until either one thread
2478 reports an event or we run out of threads to step.
2479
2480 When stepping more than one thread, chances are that some threads reach
2481 the end of their execution history earlier than others. If we reported
2482 this immediately, all-stop on top of non-stop would stop all threads and
2483 resume the same threads next time. And we would report the same thread
2484 having reached the end of its execution history again.
2485
2486 In the worst case, this would starve the other threads. But even if other
2487 threads would be allowed to make progress, this would result in far too
2488 many intermediate stops.
2489
2490 We therefore delay the reporting of "no execution history" until we have
2491 nothing else to report. By this time, all threads should have moved to
2492 either the beginning or the end of their execution history. There will
2493 be a single user-visible stop. */
2494 eventing = NULL;
2495 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2496 {
2497 unsigned int ix;
2498
2499 ix = 0;
2500 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2501 {
2502 *status = record_btrace_step_thread (tp);
2503
2504 switch (status->kind)
2505 {
2506 case TARGET_WAITKIND_IGNORE:
2507 ix++;
2508 break;
2509
2510 case TARGET_WAITKIND_NO_HISTORY:
2511 VEC_safe_push (tp_t, no_history,
2512 VEC_ordered_remove (tp_t, moving, ix));
2513 break;
2514
2515 default:
2516 eventing = VEC_unordered_remove (tp_t, moving, ix);
2517 break;
2518 }
2519 }
2520 }
2521
2522 if (eventing == NULL)
2523 {
2524 /* We started with at least one moving thread. This thread must have
2525 either stopped or reached the end of its execution history.
2526
2527 In the former case, EVENTING must not be NULL.
2528 In the latter case, NO_HISTORY must not be empty. */
2529 gdb_assert (!VEC_empty (tp_t, no_history));
2530
2531 /* We kept threads moving at the end of their execution history. Stop
2532 EVENTING now that we are going to report its stop. */
2533 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2534 eventing->btrace.flags &= ~BTHR_MOVE;
2535
2536 *status = btrace_step_no_history ();
2537 }
2538
2539 gdb_assert (eventing != NULL);
2540
2541 /* We kept threads replaying at the end of their execution history. Stop
2542 replaying EVENTING now that we are going to report its stop. */
2543 record_btrace_stop_replaying_at_end (eventing);
2544
2545 /* Stop all other threads. */
2546 if (!target_is_non_stop_p ())
2547 ALL_NON_EXITED_THREADS (tp)
2548 record_btrace_cancel_resume (tp);
2549
2550 /* In async mode, we need to announce further events. */
2551 if (target_is_async_p ())
2552 record_btrace_maybe_mark_async_event (moving, no_history);
2553
2554 /* Start record histories anew from the current position. */
2555 record_btrace_clear_histories (&eventing->btrace);
2556
2557 /* We moved the replay position but did not update registers. */
2558 registers_changed_ptid (eventing->ptid);
2559
2560 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2561 target_pid_to_str (eventing->ptid),
2562 target_waitstatus_to_string (status));
2563
2564 do_cleanups (cleanups);
2565 return eventing->ptid;
2566 }
2567
2568 /* The to_stop method of target record-btrace. */
2569
2570 static void
2571 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2572 {
2573 DEBUG ("stop %s", target_pid_to_str (ptid));
2574
2575 /* As long as we're not replaying, just forward the request. */
2576 if ((execution_direction != EXEC_REVERSE)
2577 && !record_btrace_is_replaying (ops, minus_one_ptid))
2578 {
2579 ops = ops->beneath;
2580 ops->to_stop (ops, ptid);
2581 }
2582 else
2583 {
2584 struct thread_info *tp;
2585
2586 ALL_NON_EXITED_THREADS (tp)
2587 if (ptid_match (tp->ptid, ptid))
2588 {
2589 tp->btrace.flags &= ~BTHR_MOVE;
2590 tp->btrace.flags |= BTHR_STOP;
2591 }
2592 }
2593 }
2594
2595 /* The to_can_execute_reverse method of target record-btrace. */
2596
2597 static int
2598 record_btrace_can_execute_reverse (struct target_ops *self)
2599 {
2600 return 1;
2601 }
2602
2603 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2604
2605 static int
2606 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2607 {
2608 if (record_btrace_is_replaying (ops, minus_one_ptid))
2609 {
2610 struct thread_info *tp = inferior_thread ();
2611
2612 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2613 }
2614
2615 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2616 }
2617
2618 /* The to_supports_stopped_by_sw_breakpoint method of target
2619 record-btrace. */
2620
2621 static int
2622 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2623 {
2624 if (record_btrace_is_replaying (ops, minus_one_ptid))
2625 return 1;
2626
2627 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2628 }
2629
2630 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2631
2632 static int
2633 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2634 {
2635 if (record_btrace_is_replaying (ops, minus_one_ptid))
2636 {
2637 struct thread_info *tp = inferior_thread ();
2638
2639 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2640 }
2641
2642 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2643 }
2644
2645 /* The to_supports_stopped_by_hw_breakpoint method of target
2646 record-btrace. */
2647
2648 static int
2649 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2650 {
2651 if (record_btrace_is_replaying (ops, minus_one_ptid))
2652 return 1;
2653
2654 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2655 }
2656
2657 /* The to_update_thread_list method of target record-btrace. */
2658
2659 static void
2660 record_btrace_update_thread_list (struct target_ops *ops)
2661 {
2662 /* We don't add or remove threads during replay. */
2663 if (record_btrace_is_replaying (ops, minus_one_ptid))
2664 return;
2665
2666 /* Forward the request. */
2667 ops = ops->beneath;
2668 ops->to_update_thread_list (ops);
2669 }
2670
2671 /* The to_thread_alive method of target record-btrace. */
2672
2673 static int
2674 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2675 {
2676 /* We don't add or remove threads during replay. */
2677 if (record_btrace_is_replaying (ops, minus_one_ptid))
2678 return find_thread_ptid (ptid) != NULL;
2679
2680 /* Forward the request. */
2681 ops = ops->beneath;
2682 return ops->to_thread_alive (ops, ptid);
2683 }
2684
2685 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2686 is stopped. */
2687
2688 static void
2689 record_btrace_set_replay (struct thread_info *tp,
2690 const struct btrace_insn_iterator *it)
2691 {
2692 struct btrace_thread_info *btinfo;
2693
2694 btinfo = &tp->btrace;
2695
2696 if (it == NULL || it->function == NULL)
2697 record_btrace_stop_replaying (tp);
2698 else
2699 {
2700 if (btinfo->replay == NULL)
2701 record_btrace_start_replaying (tp);
2702 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2703 return;
2704
2705 *btinfo->replay = *it;
2706 registers_changed_ptid (tp->ptid);
2707 }
2708
2709 /* Start anew from the new replay position. */
2710 record_btrace_clear_histories (btinfo);
2711
2712 stop_pc = regcache_read_pc (get_current_regcache ());
2713 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2714 }
2715
2716 /* The to_goto_record_begin method of target record-btrace. */
2717
2718 static void
2719 record_btrace_goto_begin (struct target_ops *self)
2720 {
2721 struct thread_info *tp;
2722 struct btrace_insn_iterator begin;
2723
2724 tp = require_btrace_thread ();
2725
2726 btrace_insn_begin (&begin, &tp->btrace);
2727 record_btrace_set_replay (tp, &begin);
2728 }
2729
2730 /* The to_goto_record_end method of target record-btrace. */
2731
2732 static void
2733 record_btrace_goto_end (struct target_ops *ops)
2734 {
2735 struct thread_info *tp;
2736
2737 tp = require_btrace_thread ();
2738
2739 record_btrace_set_replay (tp, NULL);
2740 }
2741
2742 /* The to_goto_record method of target record-btrace. */
2743
2744 static void
2745 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2746 {
2747 struct thread_info *tp;
2748 struct btrace_insn_iterator it;
2749 unsigned int number;
2750 int found;
2751
2752 number = insn;
2753
2754 /* Check for wrap-arounds. */
2755 if (number != insn)
2756 error (_("Instruction number out of range."));
2757
2758 tp = require_btrace_thread ();
2759
2760 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2761 if (found == 0)
2762 error (_("No such instruction."));
2763
2764 record_btrace_set_replay (tp, &it);
2765 }
2766
2767 /* The to_record_stop_replaying method of target record-btrace. */
2768
2769 static void
2770 record_btrace_stop_replaying_all (struct target_ops *self)
2771 {
2772 struct thread_info *tp;
2773
2774 ALL_NON_EXITED_THREADS (tp)
2775 record_btrace_stop_replaying (tp);
2776 }
2777
2778 /* The to_execution_direction target method. */
2779
2780 static enum exec_direction_kind
2781 record_btrace_execution_direction (struct target_ops *self)
2782 {
2783 return record_btrace_resume_exec_dir;
2784 }
2785
2786 /* The to_prepare_to_generate_core target method. */
2787
2788 static void
2789 record_btrace_prepare_to_generate_core (struct target_ops *self)
2790 {
2791 record_btrace_generating_corefile = 1;
2792 }
2793
2794 /* The to_done_generating_core target method. */
2795
2796 static void
2797 record_btrace_done_generating_core (struct target_ops *self)
2798 {
2799 record_btrace_generating_corefile = 0;
2800 }
2801
2802 /* Initialize the record-btrace target ops. */
2803
2804 static void
2805 init_record_btrace_ops (void)
2806 {
2807 struct target_ops *ops;
2808
2809 ops = &record_btrace_ops;
2810 ops->to_shortname = "record-btrace";
2811 ops->to_longname = "Branch tracing target";
2812 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2813 ops->to_open = record_btrace_open;
2814 ops->to_close = record_btrace_close;
2815 ops->to_async = record_btrace_async;
2816 ops->to_detach = record_detach;
2817 ops->to_disconnect = record_disconnect;
2818 ops->to_mourn_inferior = record_mourn_inferior;
2819 ops->to_kill = record_kill;
2820 ops->to_stop_recording = record_btrace_stop_recording;
2821 ops->to_info_record = record_btrace_info;
2822 ops->to_insn_history = record_btrace_insn_history;
2823 ops->to_insn_history_from = record_btrace_insn_history_from;
2824 ops->to_insn_history_range = record_btrace_insn_history_range;
2825 ops->to_call_history = record_btrace_call_history;
2826 ops->to_call_history_from = record_btrace_call_history_from;
2827 ops->to_call_history_range = record_btrace_call_history_range;
2828 ops->to_record_is_replaying = record_btrace_is_replaying;
2829 ops->to_record_will_replay = record_btrace_will_replay;
2830 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2831 ops->to_xfer_partial = record_btrace_xfer_partial;
2832 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2833 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2834 ops->to_fetch_registers = record_btrace_fetch_registers;
2835 ops->to_store_registers = record_btrace_store_registers;
2836 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2837 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2838 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2839 ops->to_resume = record_btrace_resume;
2840 ops->to_wait = record_btrace_wait;
2841 ops->to_stop = record_btrace_stop;
2842 ops->to_update_thread_list = record_btrace_update_thread_list;
2843 ops->to_thread_alive = record_btrace_thread_alive;
2844 ops->to_goto_record_begin = record_btrace_goto_begin;
2845 ops->to_goto_record_end = record_btrace_goto_end;
2846 ops->to_goto_record = record_btrace_goto;
2847 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2848 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2849 ops->to_supports_stopped_by_sw_breakpoint
2850 = record_btrace_supports_stopped_by_sw_breakpoint;
2851 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2852 ops->to_supports_stopped_by_hw_breakpoint
2853 = record_btrace_supports_stopped_by_hw_breakpoint;
2854 ops->to_execution_direction = record_btrace_execution_direction;
2855 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2856 ops->to_done_generating_core = record_btrace_done_generating_core;
2857 ops->to_stratum = record_stratum;
2858 ops->to_magic = OPS_MAGIC;
2859 }
2860
2861 /* Start recording in BTS format. */
2862
2863 static void
2864 cmd_record_btrace_bts_start (char *args, int from_tty)
2865 {
2866 if (args != NULL && *args != 0)
2867 error (_("Invalid argument."));
2868
2869 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2870
2871 TRY
2872 {
2873 execute_command ("target record-btrace", from_tty);
2874 }
2875 CATCH (exception, RETURN_MASK_ALL)
2876 {
2877 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2878 throw_exception (exception);
2879 }
2880 END_CATCH
2881 }
2882
2883 /* Start recording Intel(R) Processor Trace. */
2884
2885 static void
2886 cmd_record_btrace_pt_start (char *args, int from_tty)
2887 {
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2890
2891 record_btrace_conf.format = BTRACE_FORMAT_PT;
2892
2893 TRY
2894 {
2895 execute_command ("target record-btrace", from_tty);
2896 }
2897 CATCH (exception, RETURN_MASK_ALL)
2898 {
2899 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2900 throw_exception (exception);
2901 }
2902 END_CATCH
2903 }
2904
2905 /* Alias for "target record". */
2906
2907 static void
2908 cmd_record_btrace_start (char *args, int from_tty)
2909 {
2910 if (args != NULL && *args != 0)
2911 error (_("Invalid argument."));
2912
2913 record_btrace_conf.format = BTRACE_FORMAT_PT;
2914
2915 TRY
2916 {
2917 execute_command ("target record-btrace", from_tty);
2918 }
2919 CATCH (exception, RETURN_MASK_ALL)
2920 {
2921 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2922
2923 TRY
2924 {
2925 execute_command ("target record-btrace", from_tty);
2926 }
2927 CATCH (exception, RETURN_MASK_ALL)
2928 {
2929 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2930 throw_exception (exception);
2931 }
2932 END_CATCH
2933 }
2934 END_CATCH
2935 }
2936
2937 /* The "set record btrace" command. */
2938
2939 static void
2940 cmd_set_record_btrace (char *args, int from_tty)
2941 {
2942 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2943 }
2944
2945 /* The "show record btrace" command. */
2946
2947 static void
2948 cmd_show_record_btrace (char *args, int from_tty)
2949 {
2950 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2951 }
2952
2953 /* The "show record btrace replay-memory-access" command. */
2954
2955 static void
2956 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2957 struct cmd_list_element *c, const char *value)
2958 {
2959 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2960 replay_memory_access);
2961 }
2962
2963 /* The "set record btrace bts" command. */
2964
2965 static void
2966 cmd_set_record_btrace_bts (char *args, int from_tty)
2967 {
2968 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2969 "by an appropriate subcommand.\n"));
2970 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2971 all_commands, gdb_stdout);
2972 }
2973
2974 /* The "show record btrace bts" command. */
2975
2976 static void
2977 cmd_show_record_btrace_bts (char *args, int from_tty)
2978 {
2979 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2980 }
2981
2982 /* The "set record btrace pt" command. */
2983
2984 static void
2985 cmd_set_record_btrace_pt (char *args, int from_tty)
2986 {
2987 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2988 "by an appropriate subcommand.\n"));
2989 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2990 all_commands, gdb_stdout);
2991 }
2992
2993 /* The "show record btrace pt" command. */
2994
2995 static void
2996 cmd_show_record_btrace_pt (char *args, int from_tty)
2997 {
2998 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2999 }
3000
3001 /* The "record bts buffer-size" show value function. */
3002
3003 static void
3004 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3005 struct cmd_list_element *c,
3006 const char *value)
3007 {
3008 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3009 value);
3010 }
3011
3012 /* The "record pt buffer-size" show value function. */
3013
3014 static void
3015 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3016 struct cmd_list_element *c,
3017 const char *value)
3018 {
3019 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3020 value);
3021 }
3022
3023 void _initialize_record_btrace (void);
3024
3025 /* Initialize btrace commands. */
3026
3027 void
3028 _initialize_record_btrace (void)
3029 {
3030 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3031 _("Start branch trace recording."), &record_btrace_cmdlist,
3032 "record btrace ", 0, &record_cmdlist);
3033 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3034
3035 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3036 _("\
3037 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3038 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3039 This format may not be available on all processors."),
3040 &record_btrace_cmdlist);
3041 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3042
3043 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3044 _("\
3045 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
3046 This format may not be available on all processors."),
3047 &record_btrace_cmdlist);
3048 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3049
3050 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3051 _("Set record options"), &set_record_btrace_cmdlist,
3052 "set record btrace ", 0, &set_record_cmdlist);
3053
3054 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3055 _("Show record options"), &show_record_btrace_cmdlist,
3056 "show record btrace ", 0, &show_record_cmdlist);
3057
3058 add_setshow_enum_cmd ("replay-memory-access", no_class,
3059 replay_memory_access_types, &replay_memory_access, _("\
3060 Set what memory accesses are allowed during replay."), _("\
3061 Show what memory accesses are allowed during replay."),
3062 _("Default is READ-ONLY.\n\n\
3063 The btrace record target does not trace data.\n\
3064 The memory therefore corresponds to the live target and not \
3065 to the current replay position.\n\n\
3066 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3067 When READ-WRITE, allow accesses to read-only and read-write memory during \
3068 replay."),
3069 NULL, cmd_show_replay_memory_access,
3070 &set_record_btrace_cmdlist,
3071 &show_record_btrace_cmdlist);
3072
3073 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3074 _("Set record btrace bts options"),
3075 &set_record_btrace_bts_cmdlist,
3076 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3077
3078 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3079 _("Show record btrace bts options"),
3080 &show_record_btrace_bts_cmdlist,
3081 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3082
3083 add_setshow_uinteger_cmd ("buffer-size", no_class,
3084 &record_btrace_conf.bts.size,
3085 _("Set the record/replay bts buffer size."),
3086 _("Show the record/replay bts buffer size."), _("\
3087 When starting recording request a trace buffer of this size. \
3088 The actual buffer size may differ from the requested size. \
3089 Use \"info record\" to see the actual buffer size.\n\n\
3090 Bigger buffers allow longer recording but also take more time to process \
3091 the recorded execution trace.\n\n\
3092 The trace buffer size may not be changed while recording."), NULL,
3093 show_record_bts_buffer_size_value,
3094 &set_record_btrace_bts_cmdlist,
3095 &show_record_btrace_bts_cmdlist);
3096
3097 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3098 _("Set record btrace pt options"),
3099 &set_record_btrace_pt_cmdlist,
3100 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3101
3102 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3103 _("Show record btrace pt options"),
3104 &show_record_btrace_pt_cmdlist,
3105 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3106
3107 add_setshow_uinteger_cmd ("buffer-size", no_class,
3108 &record_btrace_conf.pt.size,
3109 _("Set the record/replay pt buffer size."),
3110 _("Show the record/replay pt buffer size."), _("\
3111 Bigger buffers allow longer recording but also take more time to process \
3112 the recorded execution.\n\
3113 The actual buffer size may differ from the requested size. Use \"info record\" \
3114 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3115 &set_record_btrace_pt_cmdlist,
3116 &show_record_btrace_pt_cmdlist);
3117
3118 init_record_btrace_ops ();
3119 add_target (&record_btrace_ops);
3120
3121 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3122 xcalloc, xfree);
3123
3124 record_btrace_conf.bts.size = 64 * 1024;
3125 record_btrace_conf.pt.size = 16 * 1024;
3126 }