]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
GDB copyright headers update after running GDB's copyright.py script.
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2016 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40 #include "vec.h"
41
42 /* The target_ops of record-btrace. */
43 static struct target_ops record_btrace_ops;
44
45 /* A new thread observer enabling branch tracing for the new thread. */
46 static struct observer *record_btrace_thread_observer;
47
48 /* Memory access types used in set/show record btrace replay-memory-access. */
49 static const char replay_memory_access_read_only[] = "read-only";
50 static const char replay_memory_access_read_write[] = "read-write";
51 static const char *const replay_memory_access_types[] =
52 {
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56 };
57
58 /* The currently allowed replay memory access type. */
59 static const char *replay_memory_access = replay_memory_access_read_only;
60
61 /* Command lists for "set/show record btrace". */
62 static struct cmd_list_element *set_record_btrace_cmdlist;
63 static struct cmd_list_element *show_record_btrace_cmdlist;
64
65 /* The execution direction of the last resume we got. See record-full.c. */
66 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68 /* The async event handler for reverse/replay execution. */
69 static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
71 /* A flag indicating that we are currently generating a core file. */
72 static int record_btrace_generating_corefile;
73
74 /* The current branch trace configuration. */
75 static struct btrace_config record_btrace_conf;
76
77 /* Command list for "record btrace". */
78 static struct cmd_list_element *record_btrace_cmdlist;
79
80 /* Command lists for "set/show record btrace bts". */
81 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
82 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
83
84 /* Command lists for "set/show record btrace pt". */
85 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
86 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
87
88 /* Print a record-btrace debug message. Use do ... while (0) to avoid
89 ambiguities when used in if statements. */
90
91 #define DEBUG(msg, args...) \
92 do \
93 { \
94 if (record_debug != 0) \
95 fprintf_unfiltered (gdb_stdlog, \
96 "[record-btrace] " msg "\n", ##args); \
97 } \
98 while (0)
99
100
101 /* Update the branch trace for the current thread and return a pointer to its
102 thread_info.
103
104 Throws an error if there is no thread or no trace. This function never
105 returns NULL. */
106
107 static struct thread_info *
108 require_btrace_thread (void)
109 {
110 struct thread_info *tp;
111
112 DEBUG ("require");
113
114 tp = find_thread_ptid (inferior_ptid);
115 if (tp == NULL)
116 error (_("No thread."));
117
118 btrace_fetch (tp);
119
120 if (btrace_is_empty (tp))
121 error (_("No trace."));
122
123 return tp;
124 }
125
126 /* Update the branch trace for the current thread and return a pointer to its
127 branch trace information struct.
128
129 Throws an error if there is no thread or no trace. This function never
130 returns NULL. */
131
132 static struct btrace_thread_info *
133 require_btrace (void)
134 {
135 struct thread_info *tp;
136
137 tp = require_btrace_thread ();
138
139 return &tp->btrace;
140 }
141
142 /* Enable branch tracing for one thread. Warn on errors. */
143
144 static void
145 record_btrace_enable_warn (struct thread_info *tp)
146 {
147 TRY
148 {
149 btrace_enable (tp, &record_btrace_conf);
150 }
151 CATCH (error, RETURN_MASK_ERROR)
152 {
153 warning ("%s", error.message);
154 }
155 END_CATCH
156 }
157
158 /* Callback function to disable branch tracing for one thread. */
159
160 static void
161 record_btrace_disable_callback (void *arg)
162 {
163 struct thread_info *tp = (struct thread_info *) arg;
164
165 btrace_disable (tp);
166 }
167
168 /* Enable automatic tracing of new threads. */
169
170 static void
171 record_btrace_auto_enable (void)
172 {
173 DEBUG ("attach thread observer");
174
175 record_btrace_thread_observer
176 = observer_attach_new_thread (record_btrace_enable_warn);
177 }
178
179 /* Disable automatic tracing of new threads. */
180
181 static void
182 record_btrace_auto_disable (void)
183 {
184 /* The observer may have been detached, already. */
185 if (record_btrace_thread_observer == NULL)
186 return;
187
188 DEBUG ("detach thread observer");
189
190 observer_detach_new_thread (record_btrace_thread_observer);
191 record_btrace_thread_observer = NULL;
192 }
193
194 /* The record-btrace async event handler function. */
195
196 static void
197 record_btrace_handle_async_inferior_event (gdb_client_data data)
198 {
199 inferior_event_handler (INF_REG_EVENT, NULL);
200 }
201
202 /* The to_open method of target record-btrace. */
203
204 static void
205 record_btrace_open (const char *args, int from_tty)
206 {
207 struct cleanup *disable_chain;
208 struct thread_info *tp;
209
210 DEBUG ("open");
211
212 record_preopen ();
213
214 if (!target_has_execution)
215 error (_("The program is not being run."));
216
217 gdb_assert (record_btrace_thread_observer == NULL);
218
219 disable_chain = make_cleanup (null_cleanup, NULL);
220 ALL_NON_EXITED_THREADS (tp)
221 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
222 {
223 btrace_enable (tp, &record_btrace_conf);
224
225 make_cleanup (record_btrace_disable_callback, tp);
226 }
227
228 record_btrace_auto_enable ();
229
230 push_target (&record_btrace_ops);
231
232 record_btrace_async_inferior_event_handler
233 = create_async_event_handler (record_btrace_handle_async_inferior_event,
234 NULL);
235 record_btrace_generating_corefile = 0;
236
237 observer_notify_record_changed (current_inferior (), 1);
238
239 discard_cleanups (disable_chain);
240 }
241
242 /* The to_stop_recording method of target record-btrace. */
243
244 static void
245 record_btrace_stop_recording (struct target_ops *self)
246 {
247 struct thread_info *tp;
248
249 DEBUG ("stop recording");
250
251 record_btrace_auto_disable ();
252
253 ALL_NON_EXITED_THREADS (tp)
254 if (tp->btrace.target != NULL)
255 btrace_disable (tp);
256 }
257
258 /* The to_close method of target record-btrace. */
259
260 static void
261 record_btrace_close (struct target_ops *self)
262 {
263 struct thread_info *tp;
264
265 if (record_btrace_async_inferior_event_handler != NULL)
266 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
267
268 /* Make sure automatic recording gets disabled even if we did not stop
269 recording before closing the record-btrace target. */
270 record_btrace_auto_disable ();
271
272 /* We should have already stopped recording.
273 Tear down btrace in case we have not. */
274 ALL_NON_EXITED_THREADS (tp)
275 btrace_teardown (tp);
276 }
277
278 /* The to_async method of target record-btrace. */
279
280 static void
281 record_btrace_async (struct target_ops *ops, int enable)
282 {
283 if (enable)
284 mark_async_event_handler (record_btrace_async_inferior_event_handler);
285 else
286 clear_async_event_handler (record_btrace_async_inferior_event_handler);
287
288 ops->beneath->to_async (ops->beneath, enable);
289 }
290
291 /* Adjusts the size and returns a human readable size suffix. */
292
293 static const char *
294 record_btrace_adjust_size (unsigned int *size)
295 {
296 unsigned int sz;
297
298 sz = *size;
299
300 if ((sz & ((1u << 30) - 1)) == 0)
301 {
302 *size = sz >> 30;
303 return "GB";
304 }
305 else if ((sz & ((1u << 20) - 1)) == 0)
306 {
307 *size = sz >> 20;
308 return "MB";
309 }
310 else if ((sz & ((1u << 10) - 1)) == 0)
311 {
312 *size = sz >> 10;
313 return "kB";
314 }
315 else
316 return "";
317 }
318
319 /* Print a BTS configuration. */
320
321 static void
322 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
323 {
324 const char *suffix;
325 unsigned int size;
326
327 size = conf->size;
328 if (size > 0)
329 {
330 suffix = record_btrace_adjust_size (&size);
331 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
332 }
333 }
334
335 /* Print an Intel(R) Processor Trace configuration. */
336
337 static void
338 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
339 {
340 const char *suffix;
341 unsigned int size;
342
343 size = conf->size;
344 if (size > 0)
345 {
346 suffix = record_btrace_adjust_size (&size);
347 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
348 }
349 }
350
351 /* Print a branch tracing configuration. */
352
353 static void
354 record_btrace_print_conf (const struct btrace_config *conf)
355 {
356 printf_unfiltered (_("Recording format: %s.\n"),
357 btrace_format_string (conf->format));
358
359 switch (conf->format)
360 {
361 case BTRACE_FORMAT_NONE:
362 return;
363
364 case BTRACE_FORMAT_BTS:
365 record_btrace_print_bts_conf (&conf->bts);
366 return;
367
368 case BTRACE_FORMAT_PT:
369 record_btrace_print_pt_conf (&conf->pt);
370 return;
371 }
372
373 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
374 }
375
376 /* The to_info_record method of target record-btrace. */
377
378 static void
379 record_btrace_info (struct target_ops *self)
380 {
381 struct btrace_thread_info *btinfo;
382 const struct btrace_config *conf;
383 struct thread_info *tp;
384 unsigned int insns, calls, gaps;
385
386 DEBUG ("info");
387
388 tp = find_thread_ptid (inferior_ptid);
389 if (tp == NULL)
390 error (_("No thread."));
391
392 btinfo = &tp->btrace;
393
394 conf = btrace_conf (btinfo);
395 if (conf != NULL)
396 record_btrace_print_conf (conf);
397
398 btrace_fetch (tp);
399
400 insns = 0;
401 calls = 0;
402 gaps = 0;
403
404 if (!btrace_is_empty (tp))
405 {
406 struct btrace_call_iterator call;
407 struct btrace_insn_iterator insn;
408
409 btrace_call_end (&call, btinfo);
410 btrace_call_prev (&call, 1);
411 calls = btrace_call_number (&call);
412
413 btrace_insn_end (&insn, btinfo);
414
415 insns = btrace_insn_number (&insn);
416 if (insns != 0)
417 {
418 /* The last instruction does not really belong to the trace. */
419 insns -= 1;
420 }
421 else
422 {
423 unsigned int steps;
424
425 /* Skip gaps at the end. */
426 do
427 {
428 steps = btrace_insn_prev (&insn, 1);
429 if (steps == 0)
430 break;
431
432 insns = btrace_insn_number (&insn);
433 }
434 while (insns == 0);
435 }
436
437 gaps = btinfo->ngaps;
438 }
439
440 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
441 "for thread %d (%s).\n"), insns, calls, gaps,
442 tp->num, target_pid_to_str (tp->ptid));
443
444 if (btrace_is_replaying (tp))
445 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
446 btrace_insn_number (btinfo->replay));
447 }
448
449 /* Print a decode error. */
450
451 static void
452 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
453 enum btrace_format format)
454 {
455 const char *errstr;
456 int is_error;
457
458 errstr = _("unknown");
459 is_error = 1;
460
461 switch (format)
462 {
463 default:
464 break;
465
466 case BTRACE_FORMAT_BTS:
467 switch (errcode)
468 {
469 default:
470 break;
471
472 case BDE_BTS_OVERFLOW:
473 errstr = _("instruction overflow");
474 break;
475
476 case BDE_BTS_INSN_SIZE:
477 errstr = _("unknown instruction");
478 break;
479 }
480 break;
481
482 #if defined (HAVE_LIBIPT)
483 case BTRACE_FORMAT_PT:
484 switch (errcode)
485 {
486 case BDE_PT_USER_QUIT:
487 is_error = 0;
488 errstr = _("trace decode cancelled");
489 break;
490
491 case BDE_PT_DISABLED:
492 is_error = 0;
493 errstr = _("disabled");
494 break;
495
496 case BDE_PT_OVERFLOW:
497 is_error = 0;
498 errstr = _("overflow");
499 break;
500
501 default:
502 if (errcode < 0)
503 errstr = pt_errstr (pt_errcode (errcode));
504 break;
505 }
506 break;
507 #endif /* defined (HAVE_LIBIPT) */
508 }
509
510 ui_out_text (uiout, _("["));
511 if (is_error)
512 {
513 ui_out_text (uiout, _("decode error ("));
514 ui_out_field_int (uiout, "errcode", errcode);
515 ui_out_text (uiout, _("): "));
516 }
517 ui_out_text (uiout, errstr);
518 ui_out_text (uiout, _("]\n"));
519 }
520
521 /* Print an unsigned int. */
522
523 static void
524 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
525 {
526 ui_out_field_fmt (uiout, fld, "%u", val);
527 }
528
529 /* A range of source lines. */
530
531 struct btrace_line_range
532 {
533 /* The symtab this line is from. */
534 struct symtab *symtab;
535
536 /* The first line (inclusive). */
537 int begin;
538
539 /* The last line (exclusive). */
540 int end;
541 };
542
543 /* Construct a line range. */
544
545 static struct btrace_line_range
546 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
547 {
548 struct btrace_line_range range;
549
550 range.symtab = symtab;
551 range.begin = begin;
552 range.end = end;
553
554 return range;
555 }
556
557 /* Add a line to a line range. */
558
559 static struct btrace_line_range
560 btrace_line_range_add (struct btrace_line_range range, int line)
561 {
562 if (range.end <= range.begin)
563 {
564 /* This is the first entry. */
565 range.begin = line;
566 range.end = line + 1;
567 }
568 else if (line < range.begin)
569 range.begin = line;
570 else if (range.end < line)
571 range.end = line;
572
573 return range;
574 }
575
576 /* Return non-zero if RANGE is empty, zero otherwise. */
577
578 static int
579 btrace_line_range_is_empty (struct btrace_line_range range)
580 {
581 return range.end <= range.begin;
582 }
583
584 /* Return non-zero if LHS contains RHS, zero otherwise. */
585
586 static int
587 btrace_line_range_contains_range (struct btrace_line_range lhs,
588 struct btrace_line_range rhs)
589 {
590 return ((lhs.symtab == rhs.symtab)
591 && (lhs.begin <= rhs.begin)
592 && (rhs.end <= lhs.end));
593 }
594
595 /* Find the line range associated with PC. */
596
597 static struct btrace_line_range
598 btrace_find_line_range (CORE_ADDR pc)
599 {
600 struct btrace_line_range range;
601 struct linetable_entry *lines;
602 struct linetable *ltable;
603 struct symtab *symtab;
604 int nlines, i;
605
606 symtab = find_pc_line_symtab (pc);
607 if (symtab == NULL)
608 return btrace_mk_line_range (NULL, 0, 0);
609
610 ltable = SYMTAB_LINETABLE (symtab);
611 if (ltable == NULL)
612 return btrace_mk_line_range (symtab, 0, 0);
613
614 nlines = ltable->nitems;
615 lines = ltable->item;
616 if (nlines <= 0)
617 return btrace_mk_line_range (symtab, 0, 0);
618
619 range = btrace_mk_line_range (symtab, 0, 0);
620 for (i = 0; i < nlines - 1; i++)
621 {
622 if ((lines[i].pc == pc) && (lines[i].line != 0))
623 range = btrace_line_range_add (range, lines[i].line);
624 }
625
626 return range;
627 }
628
629 /* Print source lines in LINES to UIOUT.
630
631 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
632 instructions corresponding to that source line. When printing a new source
633 line, we do the cleanups for the open chain and open a new cleanup chain for
634 the new source line. If the source line range in LINES is not empty, this
635 function will leave the cleanup chain for the last printed source line open
636 so instructions can be added to it. */
637
638 static void
639 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
640 struct cleanup **ui_item_chain, int flags)
641 {
642 print_source_lines_flags psl_flags;
643 int line;
644
645 psl_flags = 0;
646 if (flags & DISASSEMBLY_FILENAME)
647 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
648
649 for (line = lines.begin; line < lines.end; ++line)
650 {
651 if (*ui_item_chain != NULL)
652 do_cleanups (*ui_item_chain);
653
654 *ui_item_chain
655 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
656
657 print_source_lines (lines.symtab, line, line + 1, psl_flags);
658
659 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
660 }
661 }
662
663 /* Disassemble a section of the recorded instruction trace. */
664
665 static void
666 btrace_insn_history (struct ui_out *uiout,
667 const struct btrace_thread_info *btinfo,
668 const struct btrace_insn_iterator *begin,
669 const struct btrace_insn_iterator *end, int flags)
670 {
671 struct ui_file *stb;
672 struct cleanup *cleanups, *ui_item_chain;
673 struct disassemble_info di;
674 struct gdbarch *gdbarch;
675 struct btrace_insn_iterator it;
676 struct btrace_line_range last_lines;
677
678 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
679 btrace_insn_number (end));
680
681 flags |= DISASSEMBLY_SPECULATIVE;
682
683 gdbarch = target_gdbarch ();
684 stb = mem_fileopen ();
685 cleanups = make_cleanup_ui_file_delete (stb);
686 di = gdb_disassemble_info (gdbarch, stb);
687 last_lines = btrace_mk_line_range (NULL, 0, 0);
688
689 make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
690
691 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
692 instructions corresponding to that line. */
693 ui_item_chain = NULL;
694
695 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
696 {
697 const struct btrace_insn *insn;
698
699 insn = btrace_insn_get (&it);
700
701 /* A NULL instruction indicates a gap in the trace. */
702 if (insn == NULL)
703 {
704 const struct btrace_config *conf;
705
706 conf = btrace_conf (btinfo);
707
708 /* We have trace so we must have a configuration. */
709 gdb_assert (conf != NULL);
710
711 btrace_ui_out_decode_error (uiout, it.function->errcode,
712 conf->format);
713 }
714 else
715 {
716 struct disasm_insn dinsn;
717
718 if ((flags & DISASSEMBLY_SOURCE) != 0)
719 {
720 struct btrace_line_range lines;
721
722 lines = btrace_find_line_range (insn->pc);
723 if (!btrace_line_range_is_empty (lines)
724 && !btrace_line_range_contains_range (last_lines, lines))
725 {
726 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
727 last_lines = lines;
728 }
729 else if (ui_item_chain == NULL)
730 {
731 ui_item_chain
732 = make_cleanup_ui_out_tuple_begin_end (uiout,
733 "src_and_asm_line");
734 /* No source information. */
735 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
736 }
737
738 gdb_assert (ui_item_chain != NULL);
739 }
740
741 memset (&dinsn, 0, sizeof (dinsn));
742 dinsn.number = btrace_insn_number (&it);
743 dinsn.addr = insn->pc;
744
745 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
746 dinsn.is_speculative = 1;
747
748 gdb_pretty_print_insn (gdbarch, uiout, &di, &dinsn, flags, stb);
749 }
750 }
751
752 do_cleanups (cleanups);
753 }
754
755 /* The to_insn_history method of target record-btrace. */
756
757 static void
758 record_btrace_insn_history (struct target_ops *self, int size, int flags)
759 {
760 struct btrace_thread_info *btinfo;
761 struct btrace_insn_history *history;
762 struct btrace_insn_iterator begin, end;
763 struct cleanup *uiout_cleanup;
764 struct ui_out *uiout;
765 unsigned int context, covered;
766
767 uiout = current_uiout;
768 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
769 "insn history");
770 context = abs (size);
771 if (context == 0)
772 error (_("Bad record instruction-history-size."));
773
774 btinfo = require_btrace ();
775 history = btinfo->insn_history;
776 if (history == NULL)
777 {
778 struct btrace_insn_iterator *replay;
779
780 DEBUG ("insn-history (0x%x): %d", flags, size);
781
782 /* If we're replaying, we start at the replay position. Otherwise, we
783 start at the tail of the trace. */
784 replay = btinfo->replay;
785 if (replay != NULL)
786 begin = *replay;
787 else
788 btrace_insn_end (&begin, btinfo);
789
790 /* We start from here and expand in the requested direction. Then we
791 expand in the other direction, as well, to fill up any remaining
792 context. */
793 end = begin;
794 if (size < 0)
795 {
796 /* We want the current position covered, as well. */
797 covered = btrace_insn_next (&end, 1);
798 covered += btrace_insn_prev (&begin, context - covered);
799 covered += btrace_insn_next (&end, context - covered);
800 }
801 else
802 {
803 covered = btrace_insn_next (&end, context);
804 covered += btrace_insn_prev (&begin, context - covered);
805 }
806 }
807 else
808 {
809 begin = history->begin;
810 end = history->end;
811
812 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
813 btrace_insn_number (&begin), btrace_insn_number (&end));
814
815 if (size < 0)
816 {
817 end = begin;
818 covered = btrace_insn_prev (&begin, context);
819 }
820 else
821 {
822 begin = end;
823 covered = btrace_insn_next (&end, context);
824 }
825 }
826
827 if (covered > 0)
828 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
829 else
830 {
831 if (size < 0)
832 printf_unfiltered (_("At the start of the branch trace record.\n"));
833 else
834 printf_unfiltered (_("At the end of the branch trace record.\n"));
835 }
836
837 btrace_set_insn_history (btinfo, &begin, &end);
838 do_cleanups (uiout_cleanup);
839 }
840
841 /* The to_insn_history_range method of target record-btrace. */
842
843 static void
844 record_btrace_insn_history_range (struct target_ops *self,
845 ULONGEST from, ULONGEST to, int flags)
846 {
847 struct btrace_thread_info *btinfo;
848 struct btrace_insn_history *history;
849 struct btrace_insn_iterator begin, end;
850 struct cleanup *uiout_cleanup;
851 struct ui_out *uiout;
852 unsigned int low, high;
853 int found;
854
855 uiout = current_uiout;
856 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
857 "insn history");
858 low = from;
859 high = to;
860
861 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
862
863 /* Check for wrap-arounds. */
864 if (low != from || high != to)
865 error (_("Bad range."));
866
867 if (high < low)
868 error (_("Bad range."));
869
870 btinfo = require_btrace ();
871
872 found = btrace_find_insn_by_number (&begin, btinfo, low);
873 if (found == 0)
874 error (_("Range out of bounds."));
875
876 found = btrace_find_insn_by_number (&end, btinfo, high);
877 if (found == 0)
878 {
879 /* Silently truncate the range. */
880 btrace_insn_end (&end, btinfo);
881 }
882 else
883 {
884 /* We want both begin and end to be inclusive. */
885 btrace_insn_next (&end, 1);
886 }
887
888 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
889 btrace_set_insn_history (btinfo, &begin, &end);
890
891 do_cleanups (uiout_cleanup);
892 }
893
894 /* The to_insn_history_from method of target record-btrace. */
895
896 static void
897 record_btrace_insn_history_from (struct target_ops *self,
898 ULONGEST from, int size, int flags)
899 {
900 ULONGEST begin, end, context;
901
902 context = abs (size);
903 if (context == 0)
904 error (_("Bad record instruction-history-size."));
905
906 if (size < 0)
907 {
908 end = from;
909
910 if (from < context)
911 begin = 0;
912 else
913 begin = from - context + 1;
914 }
915 else
916 {
917 begin = from;
918 end = from + context - 1;
919
920 /* Check for wrap-around. */
921 if (end < begin)
922 end = ULONGEST_MAX;
923 }
924
925 record_btrace_insn_history_range (self, begin, end, flags);
926 }
927
928 /* Print the instruction number range for a function call history line. */
929
930 static void
931 btrace_call_history_insn_range (struct ui_out *uiout,
932 const struct btrace_function *bfun)
933 {
934 unsigned int begin, end, size;
935
936 size = VEC_length (btrace_insn_s, bfun->insn);
937 gdb_assert (size > 0);
938
939 begin = bfun->insn_offset;
940 end = begin + size - 1;
941
942 ui_out_field_uint (uiout, "insn begin", begin);
943 ui_out_text (uiout, ",");
944 ui_out_field_uint (uiout, "insn end", end);
945 }
946
947 /* Compute the lowest and highest source line for the instructions in BFUN
948 and return them in PBEGIN and PEND.
949 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
950 result from inlining or macro expansion. */
951
952 static void
953 btrace_compute_src_line_range (const struct btrace_function *bfun,
954 int *pbegin, int *pend)
955 {
956 struct btrace_insn *insn;
957 struct symtab *symtab;
958 struct symbol *sym;
959 unsigned int idx;
960 int begin, end;
961
962 begin = INT_MAX;
963 end = INT_MIN;
964
965 sym = bfun->sym;
966 if (sym == NULL)
967 goto out;
968
969 symtab = symbol_symtab (sym);
970
971 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
972 {
973 struct symtab_and_line sal;
974
975 sal = find_pc_line (insn->pc, 0);
976 if (sal.symtab != symtab || sal.line == 0)
977 continue;
978
979 begin = min (begin, sal.line);
980 end = max (end, sal.line);
981 }
982
983 out:
984 *pbegin = begin;
985 *pend = end;
986 }
987
988 /* Print the source line information for a function call history line. */
989
990 static void
991 btrace_call_history_src_line (struct ui_out *uiout,
992 const struct btrace_function *bfun)
993 {
994 struct symbol *sym;
995 int begin, end;
996
997 sym = bfun->sym;
998 if (sym == NULL)
999 return;
1000
1001 ui_out_field_string (uiout, "file",
1002 symtab_to_filename_for_display (symbol_symtab (sym)));
1003
1004 btrace_compute_src_line_range (bfun, &begin, &end);
1005 if (end < begin)
1006 return;
1007
1008 ui_out_text (uiout, ":");
1009 ui_out_field_int (uiout, "min line", begin);
1010
1011 if (end == begin)
1012 return;
1013
1014 ui_out_text (uiout, ",");
1015 ui_out_field_int (uiout, "max line", end);
1016 }
1017
1018 /* Get the name of a branch trace function. */
1019
1020 static const char *
1021 btrace_get_bfun_name (const struct btrace_function *bfun)
1022 {
1023 struct minimal_symbol *msym;
1024 struct symbol *sym;
1025
1026 if (bfun == NULL)
1027 return "??";
1028
1029 msym = bfun->msym;
1030 sym = bfun->sym;
1031
1032 if (sym != NULL)
1033 return SYMBOL_PRINT_NAME (sym);
1034 else if (msym != NULL)
1035 return MSYMBOL_PRINT_NAME (msym);
1036 else
1037 return "??";
1038 }
1039
1040 /* Disassemble a section of the recorded function trace. */
1041
1042 static void
1043 btrace_call_history (struct ui_out *uiout,
1044 const struct btrace_thread_info *btinfo,
1045 const struct btrace_call_iterator *begin,
1046 const struct btrace_call_iterator *end,
1047 int int_flags)
1048 {
1049 struct btrace_call_iterator it;
1050 record_print_flags flags = (enum record_print_flag) int_flags;
1051
1052 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1053 btrace_call_number (end));
1054
1055 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1056 {
1057 const struct btrace_function *bfun;
1058 struct minimal_symbol *msym;
1059 struct symbol *sym;
1060
1061 bfun = btrace_call_get (&it);
1062 sym = bfun->sym;
1063 msym = bfun->msym;
1064
1065 /* Print the function index. */
1066 ui_out_field_uint (uiout, "index", bfun->number);
1067 ui_out_text (uiout, "\t");
1068
1069 /* Indicate gaps in the trace. */
1070 if (bfun->errcode != 0)
1071 {
1072 const struct btrace_config *conf;
1073
1074 conf = btrace_conf (btinfo);
1075
1076 /* We have trace so we must have a configuration. */
1077 gdb_assert (conf != NULL);
1078
1079 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1080
1081 continue;
1082 }
1083
1084 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1085 {
1086 int level = bfun->level + btinfo->level, i;
1087
1088 for (i = 0; i < level; ++i)
1089 ui_out_text (uiout, " ");
1090 }
1091
1092 if (sym != NULL)
1093 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
1094 else if (msym != NULL)
1095 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
1096 else if (!ui_out_is_mi_like_p (uiout))
1097 ui_out_field_string (uiout, "function", "??");
1098
1099 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1100 {
1101 ui_out_text (uiout, _("\tinst "));
1102 btrace_call_history_insn_range (uiout, bfun);
1103 }
1104
1105 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1106 {
1107 ui_out_text (uiout, _("\tat "));
1108 btrace_call_history_src_line (uiout, bfun);
1109 }
1110
1111 ui_out_text (uiout, "\n");
1112 }
1113 }
1114
1115 /* The to_call_history method of target record-btrace. */
1116
1117 static void
1118 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1119 {
1120 struct btrace_thread_info *btinfo;
1121 struct btrace_call_history *history;
1122 struct btrace_call_iterator begin, end;
1123 struct cleanup *uiout_cleanup;
1124 struct ui_out *uiout;
1125 unsigned int context, covered;
1126 record_print_flags flags = (enum record_print_flag) int_flags;
1127
1128 uiout = current_uiout;
1129 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1130 "insn history");
1131 context = abs (size);
1132 if (context == 0)
1133 error (_("Bad record function-call-history-size."));
1134
1135 btinfo = require_btrace ();
1136 history = btinfo->call_history;
1137 if (history == NULL)
1138 {
1139 struct btrace_insn_iterator *replay;
1140
1141 DEBUG ("call-history (0x%x): %d", int_flags, size);
1142
1143 /* If we're replaying, we start at the replay position. Otherwise, we
1144 start at the tail of the trace. */
1145 replay = btinfo->replay;
1146 if (replay != NULL)
1147 {
1148 begin.function = replay->function;
1149 begin.btinfo = btinfo;
1150 }
1151 else
1152 btrace_call_end (&begin, btinfo);
1153
1154 /* We start from here and expand in the requested direction. Then we
1155 expand in the other direction, as well, to fill up any remaining
1156 context. */
1157 end = begin;
1158 if (size < 0)
1159 {
1160 /* We want the current position covered, as well. */
1161 covered = btrace_call_next (&end, 1);
1162 covered += btrace_call_prev (&begin, context - covered);
1163 covered += btrace_call_next (&end, context - covered);
1164 }
1165 else
1166 {
1167 covered = btrace_call_next (&end, context);
1168 covered += btrace_call_prev (&begin, context- covered);
1169 }
1170 }
1171 else
1172 {
1173 begin = history->begin;
1174 end = history->end;
1175
1176 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1177 btrace_call_number (&begin), btrace_call_number (&end));
1178
1179 if (size < 0)
1180 {
1181 end = begin;
1182 covered = btrace_call_prev (&begin, context);
1183 }
1184 else
1185 {
1186 begin = end;
1187 covered = btrace_call_next (&end, context);
1188 }
1189 }
1190
1191 if (covered > 0)
1192 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1193 else
1194 {
1195 if (size < 0)
1196 printf_unfiltered (_("At the start of the branch trace record.\n"));
1197 else
1198 printf_unfiltered (_("At the end of the branch trace record.\n"));
1199 }
1200
1201 btrace_set_call_history (btinfo, &begin, &end);
1202 do_cleanups (uiout_cleanup);
1203 }
1204
1205 /* The to_call_history_range method of target record-btrace. */
1206
1207 static void
1208 record_btrace_call_history_range (struct target_ops *self,
1209 ULONGEST from, ULONGEST to,
1210 int int_flags)
1211 {
1212 struct btrace_thread_info *btinfo;
1213 struct btrace_call_history *history;
1214 struct btrace_call_iterator begin, end;
1215 struct cleanup *uiout_cleanup;
1216 struct ui_out *uiout;
1217 unsigned int low, high;
1218 int found;
1219 record_print_flags flags = (enum record_print_flag) int_flags;
1220
1221 uiout = current_uiout;
1222 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1223 "func history");
1224 low = from;
1225 high = to;
1226
1227 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1228
1229 /* Check for wrap-arounds. */
1230 if (low != from || high != to)
1231 error (_("Bad range."));
1232
1233 if (high < low)
1234 error (_("Bad range."));
1235
1236 btinfo = require_btrace ();
1237
1238 found = btrace_find_call_by_number (&begin, btinfo, low);
1239 if (found == 0)
1240 error (_("Range out of bounds."));
1241
1242 found = btrace_find_call_by_number (&end, btinfo, high);
1243 if (found == 0)
1244 {
1245 /* Silently truncate the range. */
1246 btrace_call_end (&end, btinfo);
1247 }
1248 else
1249 {
1250 /* We want both begin and end to be inclusive. */
1251 btrace_call_next (&end, 1);
1252 }
1253
1254 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1255 btrace_set_call_history (btinfo, &begin, &end);
1256
1257 do_cleanups (uiout_cleanup);
1258 }
1259
1260 /* The to_call_history_from method of target record-btrace. */
1261
1262 static void
1263 record_btrace_call_history_from (struct target_ops *self,
1264 ULONGEST from, int size,
1265 int int_flags)
1266 {
1267 ULONGEST begin, end, context;
1268 record_print_flags flags = (enum record_print_flag) int_flags;
1269
1270 context = abs (size);
1271 if (context == 0)
1272 error (_("Bad record function-call-history-size."));
1273
1274 if (size < 0)
1275 {
1276 end = from;
1277
1278 if (from < context)
1279 begin = 0;
1280 else
1281 begin = from - context + 1;
1282 }
1283 else
1284 {
1285 begin = from;
1286 end = from + context - 1;
1287
1288 /* Check for wrap-around. */
1289 if (end < begin)
1290 end = ULONGEST_MAX;
1291 }
1292
1293 record_btrace_call_history_range (self, begin, end, flags);
1294 }
1295
1296 /* The to_record_is_replaying method of target record-btrace. */
1297
1298 static int
1299 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1300 {
1301 struct thread_info *tp;
1302
1303 ALL_NON_EXITED_THREADS (tp)
1304 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1305 return 1;
1306
1307 return 0;
1308 }
1309
1310 /* The to_record_will_replay method of target record-btrace. */
1311
1312 static int
1313 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1314 {
1315 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1316 }
1317
1318 /* The to_xfer_partial method of target record-btrace. */
1319
1320 static enum target_xfer_status
1321 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1322 const char *annex, gdb_byte *readbuf,
1323 const gdb_byte *writebuf, ULONGEST offset,
1324 ULONGEST len, ULONGEST *xfered_len)
1325 {
1326 struct target_ops *t;
1327
1328 /* Filter out requests that don't make sense during replay. */
1329 if (replay_memory_access == replay_memory_access_read_only
1330 && !record_btrace_generating_corefile
1331 && record_btrace_is_replaying (ops, inferior_ptid))
1332 {
1333 switch (object)
1334 {
1335 case TARGET_OBJECT_MEMORY:
1336 {
1337 struct target_section *section;
1338
1339 /* We do not allow writing memory in general. */
1340 if (writebuf != NULL)
1341 {
1342 *xfered_len = len;
1343 return TARGET_XFER_UNAVAILABLE;
1344 }
1345
1346 /* We allow reading readonly memory. */
1347 section = target_section_by_addr (ops, offset);
1348 if (section != NULL)
1349 {
1350 /* Check if the section we found is readonly. */
1351 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1352 section->the_bfd_section)
1353 & SEC_READONLY) != 0)
1354 {
1355 /* Truncate the request to fit into this section. */
1356 len = min (len, section->endaddr - offset);
1357 break;
1358 }
1359 }
1360
1361 *xfered_len = len;
1362 return TARGET_XFER_UNAVAILABLE;
1363 }
1364 }
1365 }
1366
1367 /* Forward the request. */
1368 ops = ops->beneath;
1369 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1370 offset, len, xfered_len);
1371 }
1372
1373 /* The to_insert_breakpoint method of target record-btrace. */
1374
1375 static int
1376 record_btrace_insert_breakpoint (struct target_ops *ops,
1377 struct gdbarch *gdbarch,
1378 struct bp_target_info *bp_tgt)
1379 {
1380 const char *old;
1381 int ret;
1382
1383 /* Inserting breakpoints requires accessing memory. Allow it for the
1384 duration of this function. */
1385 old = replay_memory_access;
1386 replay_memory_access = replay_memory_access_read_write;
1387
1388 ret = 0;
1389 TRY
1390 {
1391 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1392 }
1393 CATCH (except, RETURN_MASK_ALL)
1394 {
1395 replay_memory_access = old;
1396 throw_exception (except);
1397 }
1398 END_CATCH
1399 replay_memory_access = old;
1400
1401 return ret;
1402 }
1403
1404 /* The to_remove_breakpoint method of target record-btrace. */
1405
1406 static int
1407 record_btrace_remove_breakpoint (struct target_ops *ops,
1408 struct gdbarch *gdbarch,
1409 struct bp_target_info *bp_tgt)
1410 {
1411 const char *old;
1412 int ret;
1413
1414 /* Removing breakpoints requires accessing memory. Allow it for the
1415 duration of this function. */
1416 old = replay_memory_access;
1417 replay_memory_access = replay_memory_access_read_write;
1418
1419 ret = 0;
1420 TRY
1421 {
1422 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1423 }
1424 CATCH (except, RETURN_MASK_ALL)
1425 {
1426 replay_memory_access = old;
1427 throw_exception (except);
1428 }
1429 END_CATCH
1430 replay_memory_access = old;
1431
1432 return ret;
1433 }
1434
1435 /* The to_fetch_registers method of target record-btrace. */
1436
1437 static void
1438 record_btrace_fetch_registers (struct target_ops *ops,
1439 struct regcache *regcache, int regno)
1440 {
1441 struct btrace_insn_iterator *replay;
1442 struct thread_info *tp;
1443
1444 tp = find_thread_ptid (inferior_ptid);
1445 gdb_assert (tp != NULL);
1446
1447 replay = tp->btrace.replay;
1448 if (replay != NULL && !record_btrace_generating_corefile)
1449 {
1450 const struct btrace_insn *insn;
1451 struct gdbarch *gdbarch;
1452 int pcreg;
1453
1454 gdbarch = get_regcache_arch (regcache);
1455 pcreg = gdbarch_pc_regnum (gdbarch);
1456 if (pcreg < 0)
1457 return;
1458
1459 /* We can only provide the PC register. */
1460 if (regno >= 0 && regno != pcreg)
1461 return;
1462
1463 insn = btrace_insn_get (replay);
1464 gdb_assert (insn != NULL);
1465
1466 regcache_raw_supply (regcache, regno, &insn->pc);
1467 }
1468 else
1469 {
1470 struct target_ops *t = ops->beneath;
1471
1472 t->to_fetch_registers (t, regcache, regno);
1473 }
1474 }
1475
1476 /* The to_store_registers method of target record-btrace. */
1477
1478 static void
1479 record_btrace_store_registers (struct target_ops *ops,
1480 struct regcache *regcache, int regno)
1481 {
1482 struct target_ops *t;
1483
1484 if (!record_btrace_generating_corefile
1485 && record_btrace_is_replaying (ops, inferior_ptid))
1486 error (_("Cannot write registers while replaying."));
1487
1488 gdb_assert (may_write_registers != 0);
1489
1490 t = ops->beneath;
1491 t->to_store_registers (t, regcache, regno);
1492 }
1493
1494 /* The to_prepare_to_store method of target record-btrace. */
1495
1496 static void
1497 record_btrace_prepare_to_store (struct target_ops *ops,
1498 struct regcache *regcache)
1499 {
1500 struct target_ops *t;
1501
1502 if (!record_btrace_generating_corefile
1503 && record_btrace_is_replaying (ops, inferior_ptid))
1504 return;
1505
1506 t = ops->beneath;
1507 t->to_prepare_to_store (t, regcache);
1508 }
1509
1510 /* The branch trace frame cache. */
1511
1512 struct btrace_frame_cache
1513 {
1514 /* The thread. */
1515 struct thread_info *tp;
1516
1517 /* The frame info. */
1518 struct frame_info *frame;
1519
1520 /* The branch trace function segment. */
1521 const struct btrace_function *bfun;
1522 };
1523
1524 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1525
1526 static htab_t bfcache;
1527
1528 /* hash_f for htab_create_alloc of bfcache. */
1529
1530 static hashval_t
1531 bfcache_hash (const void *arg)
1532 {
1533 const struct btrace_frame_cache *cache
1534 = (const struct btrace_frame_cache *) arg;
1535
1536 return htab_hash_pointer (cache->frame);
1537 }
1538
1539 /* eq_f for htab_create_alloc of bfcache. */
1540
1541 static int
1542 bfcache_eq (const void *arg1, const void *arg2)
1543 {
1544 const struct btrace_frame_cache *cache1
1545 = (const struct btrace_frame_cache *) arg1;
1546 const struct btrace_frame_cache *cache2
1547 = (const struct btrace_frame_cache *) arg2;
1548
1549 return cache1->frame == cache2->frame;
1550 }
1551
1552 /* Create a new btrace frame cache. */
1553
1554 static struct btrace_frame_cache *
1555 bfcache_new (struct frame_info *frame)
1556 {
1557 struct btrace_frame_cache *cache;
1558 void **slot;
1559
1560 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1561 cache->frame = frame;
1562
1563 slot = htab_find_slot (bfcache, cache, INSERT);
1564 gdb_assert (*slot == NULL);
1565 *slot = cache;
1566
1567 return cache;
1568 }
1569
1570 /* Extract the branch trace function from a branch trace frame. */
1571
1572 static const struct btrace_function *
1573 btrace_get_frame_function (struct frame_info *frame)
1574 {
1575 const struct btrace_frame_cache *cache;
1576 const struct btrace_function *bfun;
1577 struct btrace_frame_cache pattern;
1578 void **slot;
1579
1580 pattern.frame = frame;
1581
1582 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1583 if (slot == NULL)
1584 return NULL;
1585
1586 cache = (const struct btrace_frame_cache *) *slot;
1587 return cache->bfun;
1588 }
1589
1590 /* Implement stop_reason method for record_btrace_frame_unwind. */
1591
1592 static enum unwind_stop_reason
1593 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1594 void **this_cache)
1595 {
1596 const struct btrace_frame_cache *cache;
1597 const struct btrace_function *bfun;
1598
1599 cache = (const struct btrace_frame_cache *) *this_cache;
1600 bfun = cache->bfun;
1601 gdb_assert (bfun != NULL);
1602
1603 if (bfun->up == NULL)
1604 return UNWIND_UNAVAILABLE;
1605
1606 return UNWIND_NO_REASON;
1607 }
1608
1609 /* Implement this_id method for record_btrace_frame_unwind. */
1610
1611 static void
1612 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1613 struct frame_id *this_id)
1614 {
1615 const struct btrace_frame_cache *cache;
1616 const struct btrace_function *bfun;
1617 CORE_ADDR code, special;
1618
1619 cache = (const struct btrace_frame_cache *) *this_cache;
1620
1621 bfun = cache->bfun;
1622 gdb_assert (bfun != NULL);
1623
1624 while (bfun->segment.prev != NULL)
1625 bfun = bfun->segment.prev;
1626
1627 code = get_frame_func (this_frame);
1628 special = bfun->number;
1629
1630 *this_id = frame_id_build_unavailable_stack_special (code, special);
1631
1632 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1633 btrace_get_bfun_name (cache->bfun),
1634 core_addr_to_string_nz (this_id->code_addr),
1635 core_addr_to_string_nz (this_id->special_addr));
1636 }
1637
1638 /* Implement prev_register method for record_btrace_frame_unwind. */
1639
1640 static struct value *
1641 record_btrace_frame_prev_register (struct frame_info *this_frame,
1642 void **this_cache,
1643 int regnum)
1644 {
1645 const struct btrace_frame_cache *cache;
1646 const struct btrace_function *bfun, *caller;
1647 const struct btrace_insn *insn;
1648 struct gdbarch *gdbarch;
1649 CORE_ADDR pc;
1650 int pcreg;
1651
1652 gdbarch = get_frame_arch (this_frame);
1653 pcreg = gdbarch_pc_regnum (gdbarch);
1654 if (pcreg < 0 || regnum != pcreg)
1655 throw_error (NOT_AVAILABLE_ERROR,
1656 _("Registers are not available in btrace record history"));
1657
1658 cache = (const struct btrace_frame_cache *) *this_cache;
1659 bfun = cache->bfun;
1660 gdb_assert (bfun != NULL);
1661
1662 caller = bfun->up;
1663 if (caller == NULL)
1664 throw_error (NOT_AVAILABLE_ERROR,
1665 _("No caller in btrace record history"));
1666
1667 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1668 {
1669 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1670 pc = insn->pc;
1671 }
1672 else
1673 {
1674 insn = VEC_last (btrace_insn_s, caller->insn);
1675 pc = insn->pc;
1676
1677 pc += gdb_insn_length (gdbarch, pc);
1678 }
1679
1680 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1681 btrace_get_bfun_name (bfun), bfun->level,
1682 core_addr_to_string_nz (pc));
1683
1684 return frame_unwind_got_address (this_frame, regnum, pc);
1685 }
1686
1687 /* Implement sniffer method for record_btrace_frame_unwind. */
1688
1689 static int
1690 record_btrace_frame_sniffer (const struct frame_unwind *self,
1691 struct frame_info *this_frame,
1692 void **this_cache)
1693 {
1694 const struct btrace_function *bfun;
1695 struct btrace_frame_cache *cache;
1696 struct thread_info *tp;
1697 struct frame_info *next;
1698
1699 /* THIS_FRAME does not contain a reference to its thread. */
1700 tp = find_thread_ptid (inferior_ptid);
1701 gdb_assert (tp != NULL);
1702
1703 bfun = NULL;
1704 next = get_next_frame (this_frame);
1705 if (next == NULL)
1706 {
1707 const struct btrace_insn_iterator *replay;
1708
1709 replay = tp->btrace.replay;
1710 if (replay != NULL)
1711 bfun = replay->function;
1712 }
1713 else
1714 {
1715 const struct btrace_function *callee;
1716
1717 callee = btrace_get_frame_function (next);
1718 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1719 bfun = callee->up;
1720 }
1721
1722 if (bfun == NULL)
1723 return 0;
1724
1725 DEBUG ("[frame] sniffed frame for %s on level %d",
1726 btrace_get_bfun_name (bfun), bfun->level);
1727
1728 /* This is our frame. Initialize the frame cache. */
1729 cache = bfcache_new (this_frame);
1730 cache->tp = tp;
1731 cache->bfun = bfun;
1732
1733 *this_cache = cache;
1734 return 1;
1735 }
1736
1737 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1738
1739 static int
1740 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1741 struct frame_info *this_frame,
1742 void **this_cache)
1743 {
1744 const struct btrace_function *bfun, *callee;
1745 struct btrace_frame_cache *cache;
1746 struct frame_info *next;
1747
1748 next = get_next_frame (this_frame);
1749 if (next == NULL)
1750 return 0;
1751
1752 callee = btrace_get_frame_function (next);
1753 if (callee == NULL)
1754 return 0;
1755
1756 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1757 return 0;
1758
1759 bfun = callee->up;
1760 if (bfun == NULL)
1761 return 0;
1762
1763 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1764 btrace_get_bfun_name (bfun), bfun->level);
1765
1766 /* This is our frame. Initialize the frame cache. */
1767 cache = bfcache_new (this_frame);
1768 cache->tp = find_thread_ptid (inferior_ptid);
1769 cache->bfun = bfun;
1770
1771 *this_cache = cache;
1772 return 1;
1773 }
1774
1775 static void
1776 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1777 {
1778 struct btrace_frame_cache *cache;
1779 void **slot;
1780
1781 cache = (struct btrace_frame_cache *) this_cache;
1782
1783 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1784 gdb_assert (slot != NULL);
1785
1786 htab_remove_elt (bfcache, cache);
1787 }
1788
1789 /* btrace recording does not store previous memory content, neither the stack
1790 frames content. Any unwinding would return errorneous results as the stack
1791 contents no longer matches the changed PC value restored from history.
1792 Therefore this unwinder reports any possibly unwound registers as
1793 <unavailable>. */
1794
1795 const struct frame_unwind record_btrace_frame_unwind =
1796 {
1797 NORMAL_FRAME,
1798 record_btrace_frame_unwind_stop_reason,
1799 record_btrace_frame_this_id,
1800 record_btrace_frame_prev_register,
1801 NULL,
1802 record_btrace_frame_sniffer,
1803 record_btrace_frame_dealloc_cache
1804 };
1805
1806 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1807 {
1808 TAILCALL_FRAME,
1809 record_btrace_frame_unwind_stop_reason,
1810 record_btrace_frame_this_id,
1811 record_btrace_frame_prev_register,
1812 NULL,
1813 record_btrace_tailcall_frame_sniffer,
1814 record_btrace_frame_dealloc_cache
1815 };
1816
1817 /* Implement the to_get_unwinder method. */
1818
1819 static const struct frame_unwind *
1820 record_btrace_to_get_unwinder (struct target_ops *self)
1821 {
1822 return &record_btrace_frame_unwind;
1823 }
1824
1825 /* Implement the to_get_tailcall_unwinder method. */
1826
1827 static const struct frame_unwind *
1828 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1829 {
1830 return &record_btrace_tailcall_frame_unwind;
1831 }
1832
1833 /* Return a human-readable string for FLAG. */
1834
1835 static const char *
1836 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1837 {
1838 switch (flag)
1839 {
1840 case BTHR_STEP:
1841 return "step";
1842
1843 case BTHR_RSTEP:
1844 return "reverse-step";
1845
1846 case BTHR_CONT:
1847 return "cont";
1848
1849 case BTHR_RCONT:
1850 return "reverse-cont";
1851
1852 case BTHR_STOP:
1853 return "stop";
1854 }
1855
1856 return "<invalid>";
1857 }
1858
1859 /* Indicate that TP should be resumed according to FLAG. */
1860
1861 static void
1862 record_btrace_resume_thread (struct thread_info *tp,
1863 enum btrace_thread_flag flag)
1864 {
1865 struct btrace_thread_info *btinfo;
1866
1867 DEBUG ("resuming thread %d (%s): %x (%s)", tp->num,
1868 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1869
1870 btinfo = &tp->btrace;
1871
1872 /* Fetch the latest branch trace. */
1873 btrace_fetch (tp);
1874
1875 /* A resume request overwrites a preceding resume or stop request. */
1876 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1877 btinfo->flags |= flag;
1878 }
1879
1880 /* Get the current frame for TP. */
1881
1882 static struct frame_info *
1883 get_thread_current_frame (struct thread_info *tp)
1884 {
1885 struct frame_info *frame;
1886 ptid_t old_inferior_ptid;
1887 int executing;
1888
1889 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1890 old_inferior_ptid = inferior_ptid;
1891 inferior_ptid = tp->ptid;
1892
1893 /* Clear the executing flag to allow changes to the current frame.
1894 We are not actually running, yet. We just started a reverse execution
1895 command or a record goto command.
1896 For the latter, EXECUTING is false and this has no effect.
1897 For the former, EXECUTING is true and we're in to_wait, about to
1898 move the thread. Since we need to recompute the stack, we temporarily
1899 set EXECUTING to flase. */
1900 executing = is_executing (inferior_ptid);
1901 set_executing (inferior_ptid, 0);
1902
1903 frame = NULL;
1904 TRY
1905 {
1906 frame = get_current_frame ();
1907 }
1908 CATCH (except, RETURN_MASK_ALL)
1909 {
1910 /* Restore the previous execution state. */
1911 set_executing (inferior_ptid, executing);
1912
1913 /* Restore the previous inferior_ptid. */
1914 inferior_ptid = old_inferior_ptid;
1915
1916 throw_exception (except);
1917 }
1918 END_CATCH
1919
1920 /* Restore the previous execution state. */
1921 set_executing (inferior_ptid, executing);
1922
1923 /* Restore the previous inferior_ptid. */
1924 inferior_ptid = old_inferior_ptid;
1925
1926 return frame;
1927 }
1928
1929 /* Start replaying a thread. */
1930
1931 static struct btrace_insn_iterator *
1932 record_btrace_start_replaying (struct thread_info *tp)
1933 {
1934 struct btrace_insn_iterator *replay;
1935 struct btrace_thread_info *btinfo;
1936
1937 btinfo = &tp->btrace;
1938 replay = NULL;
1939
1940 /* We can't start replaying without trace. */
1941 if (btinfo->begin == NULL)
1942 return NULL;
1943
1944 /* GDB stores the current frame_id when stepping in order to detects steps
1945 into subroutines.
1946 Since frames are computed differently when we're replaying, we need to
1947 recompute those stored frames and fix them up so we can still detect
1948 subroutines after we started replaying. */
1949 TRY
1950 {
1951 struct frame_info *frame;
1952 struct frame_id frame_id;
1953 int upd_step_frame_id, upd_step_stack_frame_id;
1954
1955 /* The current frame without replaying - computed via normal unwind. */
1956 frame = get_thread_current_frame (tp);
1957 frame_id = get_frame_id (frame);
1958
1959 /* Check if we need to update any stepping-related frame id's. */
1960 upd_step_frame_id = frame_id_eq (frame_id,
1961 tp->control.step_frame_id);
1962 upd_step_stack_frame_id = frame_id_eq (frame_id,
1963 tp->control.step_stack_frame_id);
1964
1965 /* We start replaying at the end of the branch trace. This corresponds
1966 to the current instruction. */
1967 replay = XNEW (struct btrace_insn_iterator);
1968 btrace_insn_end (replay, btinfo);
1969
1970 /* Skip gaps at the end of the trace. */
1971 while (btrace_insn_get (replay) == NULL)
1972 {
1973 unsigned int steps;
1974
1975 steps = btrace_insn_prev (replay, 1);
1976 if (steps == 0)
1977 error (_("No trace."));
1978 }
1979
1980 /* We're not replaying, yet. */
1981 gdb_assert (btinfo->replay == NULL);
1982 btinfo->replay = replay;
1983
1984 /* Make sure we're not using any stale registers. */
1985 registers_changed_ptid (tp->ptid);
1986
1987 /* The current frame with replaying - computed via btrace unwind. */
1988 frame = get_thread_current_frame (tp);
1989 frame_id = get_frame_id (frame);
1990
1991 /* Replace stepping related frames where necessary. */
1992 if (upd_step_frame_id)
1993 tp->control.step_frame_id = frame_id;
1994 if (upd_step_stack_frame_id)
1995 tp->control.step_stack_frame_id = frame_id;
1996 }
1997 CATCH (except, RETURN_MASK_ALL)
1998 {
1999 xfree (btinfo->replay);
2000 btinfo->replay = NULL;
2001
2002 registers_changed_ptid (tp->ptid);
2003
2004 throw_exception (except);
2005 }
2006 END_CATCH
2007
2008 return replay;
2009 }
2010
2011 /* Stop replaying a thread. */
2012
2013 static void
2014 record_btrace_stop_replaying (struct thread_info *tp)
2015 {
2016 struct btrace_thread_info *btinfo;
2017
2018 btinfo = &tp->btrace;
2019
2020 xfree (btinfo->replay);
2021 btinfo->replay = NULL;
2022
2023 /* Make sure we're not leaving any stale registers. */
2024 registers_changed_ptid (tp->ptid);
2025 }
2026
2027 /* Stop replaying TP if it is at the end of its execution history. */
2028
2029 static void
2030 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2031 {
2032 struct btrace_insn_iterator *replay, end;
2033 struct btrace_thread_info *btinfo;
2034
2035 btinfo = &tp->btrace;
2036 replay = btinfo->replay;
2037
2038 if (replay == NULL)
2039 return;
2040
2041 btrace_insn_end (&end, btinfo);
2042
2043 if (btrace_insn_cmp (replay, &end) == 0)
2044 record_btrace_stop_replaying (tp);
2045 }
2046
2047 /* The to_resume method of target record-btrace. */
2048
2049 static void
2050 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2051 enum gdb_signal signal)
2052 {
2053 struct thread_info *tp;
2054 enum btrace_thread_flag flag, cflag;
2055
2056 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2057 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2058 step ? "step" : "cont");
2059
2060 /* Store the execution direction of the last resume.
2061
2062 If there is more than one to_resume call, we have to rely on infrun
2063 to not change the execution direction in-between. */
2064 record_btrace_resume_exec_dir = execution_direction;
2065
2066 /* As long as we're not replaying, just forward the request.
2067
2068 For non-stop targets this means that no thread is replaying. In order to
2069 make progress, we may need to explicitly move replaying threads to the end
2070 of their execution history. */
2071 if ((execution_direction != EXEC_REVERSE)
2072 && !record_btrace_is_replaying (ops, minus_one_ptid))
2073 {
2074 ops = ops->beneath;
2075 ops->to_resume (ops, ptid, step, signal);
2076 return;
2077 }
2078
2079 /* Compute the btrace thread flag for the requested move. */
2080 if (execution_direction == EXEC_REVERSE)
2081 {
2082 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2083 cflag = BTHR_RCONT;
2084 }
2085 else
2086 {
2087 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2088 cflag = BTHR_CONT;
2089 }
2090
2091 /* We just indicate the resume intent here. The actual stepping happens in
2092 record_btrace_wait below.
2093
2094 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2095 if (!target_is_non_stop_p ())
2096 {
2097 gdb_assert (ptid_match (inferior_ptid, ptid));
2098
2099 ALL_NON_EXITED_THREADS (tp)
2100 if (ptid_match (tp->ptid, ptid))
2101 {
2102 if (ptid_match (tp->ptid, inferior_ptid))
2103 record_btrace_resume_thread (tp, flag);
2104 else
2105 record_btrace_resume_thread (tp, cflag);
2106 }
2107 }
2108 else
2109 {
2110 ALL_NON_EXITED_THREADS (tp)
2111 if (ptid_match (tp->ptid, ptid))
2112 record_btrace_resume_thread (tp, flag);
2113 }
2114
2115 /* Async support. */
2116 if (target_can_async_p ())
2117 {
2118 target_async (1);
2119 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2120 }
2121 }
2122
2123 /* Cancel resuming TP. */
2124
2125 static void
2126 record_btrace_cancel_resume (struct thread_info *tp)
2127 {
2128 enum btrace_thread_flag flags;
2129
2130 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2131 if (flags == 0)
2132 return;
2133
2134 DEBUG ("cancel resume thread %d (%s): %x (%s)", tp->num,
2135 target_pid_to_str (tp->ptid), flags,
2136 btrace_thread_flag_to_str (flags));
2137
2138 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2139 record_btrace_stop_replaying_at_end (tp);
2140 }
2141
2142 /* Return a target_waitstatus indicating that we ran out of history. */
2143
2144 static struct target_waitstatus
2145 btrace_step_no_history (void)
2146 {
2147 struct target_waitstatus status;
2148
2149 status.kind = TARGET_WAITKIND_NO_HISTORY;
2150
2151 return status;
2152 }
2153
2154 /* Return a target_waitstatus indicating that a step finished. */
2155
2156 static struct target_waitstatus
2157 btrace_step_stopped (void)
2158 {
2159 struct target_waitstatus status;
2160
2161 status.kind = TARGET_WAITKIND_STOPPED;
2162 status.value.sig = GDB_SIGNAL_TRAP;
2163
2164 return status;
2165 }
2166
2167 /* Return a target_waitstatus indicating that a thread was stopped as
2168 requested. */
2169
2170 static struct target_waitstatus
2171 btrace_step_stopped_on_request (void)
2172 {
2173 struct target_waitstatus status;
2174
2175 status.kind = TARGET_WAITKIND_STOPPED;
2176 status.value.sig = GDB_SIGNAL_0;
2177
2178 return status;
2179 }
2180
2181 /* Return a target_waitstatus indicating a spurious stop. */
2182
2183 static struct target_waitstatus
2184 btrace_step_spurious (void)
2185 {
2186 struct target_waitstatus status;
2187
2188 status.kind = TARGET_WAITKIND_SPURIOUS;
2189
2190 return status;
2191 }
2192
2193 /* Return a target_waitstatus indicating that the thread was not resumed. */
2194
2195 static struct target_waitstatus
2196 btrace_step_no_resumed (void)
2197 {
2198 struct target_waitstatus status;
2199
2200 status.kind = TARGET_WAITKIND_NO_RESUMED;
2201
2202 return status;
2203 }
2204
2205 /* Return a target_waitstatus indicating that we should wait again. */
2206
2207 static struct target_waitstatus
2208 btrace_step_again (void)
2209 {
2210 struct target_waitstatus status;
2211
2212 status.kind = TARGET_WAITKIND_IGNORE;
2213
2214 return status;
2215 }
2216
2217 /* Clear the record histories. */
2218
2219 static void
2220 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2221 {
2222 xfree (btinfo->insn_history);
2223 xfree (btinfo->call_history);
2224
2225 btinfo->insn_history = NULL;
2226 btinfo->call_history = NULL;
2227 }
2228
2229 /* Check whether TP's current replay position is at a breakpoint. */
2230
2231 static int
2232 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2233 {
2234 struct btrace_insn_iterator *replay;
2235 struct btrace_thread_info *btinfo;
2236 const struct btrace_insn *insn;
2237 struct inferior *inf;
2238
2239 btinfo = &tp->btrace;
2240 replay = btinfo->replay;
2241
2242 if (replay == NULL)
2243 return 0;
2244
2245 insn = btrace_insn_get (replay);
2246 if (insn == NULL)
2247 return 0;
2248
2249 inf = find_inferior_ptid (tp->ptid);
2250 if (inf == NULL)
2251 return 0;
2252
2253 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2254 &btinfo->stop_reason);
2255 }
2256
2257 /* Step one instruction in forward direction. */
2258
2259 static struct target_waitstatus
2260 record_btrace_single_step_forward (struct thread_info *tp)
2261 {
2262 struct btrace_insn_iterator *replay, end;
2263 struct btrace_thread_info *btinfo;
2264
2265 btinfo = &tp->btrace;
2266 replay = btinfo->replay;
2267
2268 /* We're done if we're not replaying. */
2269 if (replay == NULL)
2270 return btrace_step_no_history ();
2271
2272 /* Check if we're stepping a breakpoint. */
2273 if (record_btrace_replay_at_breakpoint (tp))
2274 return btrace_step_stopped ();
2275
2276 /* Skip gaps during replay. */
2277 do
2278 {
2279 unsigned int steps;
2280
2281 /* We will bail out here if we continue stepping after reaching the end
2282 of the execution history. */
2283 steps = btrace_insn_next (replay, 1);
2284 if (steps == 0)
2285 return btrace_step_no_history ();
2286 }
2287 while (btrace_insn_get (replay) == NULL);
2288
2289 /* Determine the end of the instruction trace. */
2290 btrace_insn_end (&end, btinfo);
2291
2292 /* The execution trace contains (and ends with) the current instruction.
2293 This instruction has not been executed, yet, so the trace really ends
2294 one instruction earlier. */
2295 if (btrace_insn_cmp (replay, &end) == 0)
2296 return btrace_step_no_history ();
2297
2298 return btrace_step_spurious ();
2299 }
2300
2301 /* Step one instruction in backward direction. */
2302
2303 static struct target_waitstatus
2304 record_btrace_single_step_backward (struct thread_info *tp)
2305 {
2306 struct btrace_insn_iterator *replay;
2307 struct btrace_thread_info *btinfo;
2308
2309 btinfo = &tp->btrace;
2310 replay = btinfo->replay;
2311
2312 /* Start replaying if we're not already doing so. */
2313 if (replay == NULL)
2314 replay = record_btrace_start_replaying (tp);
2315
2316 /* If we can't step any further, we reached the end of the history.
2317 Skip gaps during replay. */
2318 do
2319 {
2320 unsigned int steps;
2321
2322 steps = btrace_insn_prev (replay, 1);
2323 if (steps == 0)
2324 return btrace_step_no_history ();
2325 }
2326 while (btrace_insn_get (replay) == NULL);
2327
2328 /* Check if we're stepping a breakpoint.
2329
2330 For reverse-stepping, this check is after the step. There is logic in
2331 infrun.c that handles reverse-stepping separately. See, for example,
2332 proceed and adjust_pc_after_break.
2333
2334 This code assumes that for reverse-stepping, PC points to the last
2335 de-executed instruction, whereas for forward-stepping PC points to the
2336 next to-be-executed instruction. */
2337 if (record_btrace_replay_at_breakpoint (tp))
2338 return btrace_step_stopped ();
2339
2340 return btrace_step_spurious ();
2341 }
2342
2343 /* Step a single thread. */
2344
2345 static struct target_waitstatus
2346 record_btrace_step_thread (struct thread_info *tp)
2347 {
2348 struct btrace_thread_info *btinfo;
2349 struct target_waitstatus status;
2350 enum btrace_thread_flag flags;
2351
2352 btinfo = &tp->btrace;
2353
2354 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2355 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2356
2357 DEBUG ("stepping thread %d (%s): %x (%s)", tp->num,
2358 target_pid_to_str (tp->ptid), flags,
2359 btrace_thread_flag_to_str (flags));
2360
2361 /* We can't step without an execution history. */
2362 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2363 return btrace_step_no_history ();
2364
2365 switch (flags)
2366 {
2367 default:
2368 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2369
2370 case BTHR_STOP:
2371 return btrace_step_stopped_on_request ();
2372
2373 case BTHR_STEP:
2374 status = record_btrace_single_step_forward (tp);
2375 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2376 break;
2377
2378 return btrace_step_stopped ();
2379
2380 case BTHR_RSTEP:
2381 status = record_btrace_single_step_backward (tp);
2382 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2383 break;
2384
2385 return btrace_step_stopped ();
2386
2387 case BTHR_CONT:
2388 status = record_btrace_single_step_forward (tp);
2389 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2390 break;
2391
2392 btinfo->flags |= flags;
2393 return btrace_step_again ();
2394
2395 case BTHR_RCONT:
2396 status = record_btrace_single_step_backward (tp);
2397 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2398 break;
2399
2400 btinfo->flags |= flags;
2401 return btrace_step_again ();
2402 }
2403
2404 /* We keep threads moving at the end of their execution history. The to_wait
2405 method will stop the thread for whom the event is reported. */
2406 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2407 btinfo->flags |= flags;
2408
2409 return status;
2410 }
2411
2412 /* A vector of threads. */
2413
2414 typedef struct thread_info * tp_t;
2415 DEF_VEC_P (tp_t);
2416
2417 /* Announce further events if necessary. */
2418
2419 static void
2420 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2421 const VEC (tp_t) *no_history)
2422 {
2423 int more_moving, more_no_history;
2424
2425 more_moving = !VEC_empty (tp_t, moving);
2426 more_no_history = !VEC_empty (tp_t, no_history);
2427
2428 if (!more_moving && !more_no_history)
2429 return;
2430
2431 if (more_moving)
2432 DEBUG ("movers pending");
2433
2434 if (more_no_history)
2435 DEBUG ("no-history pending");
2436
2437 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2438 }
2439
2440 /* The to_wait method of target record-btrace. */
2441
2442 static ptid_t
2443 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2444 struct target_waitstatus *status, int options)
2445 {
2446 VEC (tp_t) *moving, *no_history;
2447 struct thread_info *tp, *eventing;
2448 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2449
2450 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2451
2452 /* As long as we're not replaying, just forward the request. */
2453 if ((execution_direction != EXEC_REVERSE)
2454 && !record_btrace_is_replaying (ops, minus_one_ptid))
2455 {
2456 ops = ops->beneath;
2457 return ops->to_wait (ops, ptid, status, options);
2458 }
2459
2460 moving = NULL;
2461 no_history = NULL;
2462
2463 make_cleanup (VEC_cleanup (tp_t), &moving);
2464 make_cleanup (VEC_cleanup (tp_t), &no_history);
2465
2466 /* Keep a work list of moving threads. */
2467 ALL_NON_EXITED_THREADS (tp)
2468 if (ptid_match (tp->ptid, ptid)
2469 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2470 VEC_safe_push (tp_t, moving, tp);
2471
2472 if (VEC_empty (tp_t, moving))
2473 {
2474 *status = btrace_step_no_resumed ();
2475
2476 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2477 target_waitstatus_to_string (status));
2478
2479 do_cleanups (cleanups);
2480 return null_ptid;
2481 }
2482
2483 /* Step moving threads one by one, one step each, until either one thread
2484 reports an event or we run out of threads to step.
2485
2486 When stepping more than one thread, chances are that some threads reach
2487 the end of their execution history earlier than others. If we reported
2488 this immediately, all-stop on top of non-stop would stop all threads and
2489 resume the same threads next time. And we would report the same thread
2490 having reached the end of its execution history again.
2491
2492 In the worst case, this would starve the other threads. But even if other
2493 threads would be allowed to make progress, this would result in far too
2494 many intermediate stops.
2495
2496 We therefore delay the reporting of "no execution history" until we have
2497 nothing else to report. By this time, all threads should have moved to
2498 either the beginning or the end of their execution history. There will
2499 be a single user-visible stop. */
2500 eventing = NULL;
2501 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2502 {
2503 unsigned int ix;
2504
2505 ix = 0;
2506 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2507 {
2508 *status = record_btrace_step_thread (tp);
2509
2510 switch (status->kind)
2511 {
2512 case TARGET_WAITKIND_IGNORE:
2513 ix++;
2514 break;
2515
2516 case TARGET_WAITKIND_NO_HISTORY:
2517 VEC_safe_push (tp_t, no_history,
2518 VEC_ordered_remove (tp_t, moving, ix));
2519 break;
2520
2521 default:
2522 eventing = VEC_unordered_remove (tp_t, moving, ix);
2523 break;
2524 }
2525 }
2526 }
2527
2528 if (eventing == NULL)
2529 {
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2532
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
2535 gdb_assert (!VEC_empty (tp_t, no_history));
2536
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
2539 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2541
2542 *status = btrace_step_no_history ();
2543 }
2544
2545 gdb_assert (eventing != NULL);
2546
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
2550
2551 /* Stop all other threads. */
2552 if (!target_is_non_stop_p ())
2553 ALL_NON_EXITED_THREADS (tp)
2554 record_btrace_cancel_resume (tp);
2555
2556 /* In async mode, we need to announce further events. */
2557 if (target_is_async_p ())
2558 record_btrace_maybe_mark_async_event (moving, no_history);
2559
2560 /* Start record histories anew from the current position. */
2561 record_btrace_clear_histories (&eventing->btrace);
2562
2563 /* We moved the replay position but did not update registers. */
2564 registers_changed_ptid (eventing->ptid);
2565
2566 DEBUG ("wait ended by thread %d (%s): %s", eventing->num,
2567 target_pid_to_str (eventing->ptid),
2568 target_waitstatus_to_string (status));
2569
2570 do_cleanups (cleanups);
2571 return eventing->ptid;
2572 }
2573
2574 /* The to_stop method of target record-btrace. */
2575
2576 static void
2577 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2578 {
2579 DEBUG ("stop %s", target_pid_to_str (ptid));
2580
2581 /* As long as we're not replaying, just forward the request. */
2582 if ((execution_direction != EXEC_REVERSE)
2583 && !record_btrace_is_replaying (ops, minus_one_ptid))
2584 {
2585 ops = ops->beneath;
2586 ops->to_stop (ops, ptid);
2587 }
2588 else
2589 {
2590 struct thread_info *tp;
2591
2592 ALL_NON_EXITED_THREADS (tp)
2593 if (ptid_match (tp->ptid, ptid))
2594 {
2595 tp->btrace.flags &= ~BTHR_MOVE;
2596 tp->btrace.flags |= BTHR_STOP;
2597 }
2598 }
2599 }
2600
2601 /* The to_can_execute_reverse method of target record-btrace. */
2602
2603 static int
2604 record_btrace_can_execute_reverse (struct target_ops *self)
2605 {
2606 return 1;
2607 }
2608
2609 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2610
2611 static int
2612 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2613 {
2614 if (record_btrace_is_replaying (ops, minus_one_ptid))
2615 {
2616 struct thread_info *tp = inferior_thread ();
2617
2618 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2619 }
2620
2621 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2622 }
2623
2624 /* The to_supports_stopped_by_sw_breakpoint method of target
2625 record-btrace. */
2626
2627 static int
2628 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2629 {
2630 if (record_btrace_is_replaying (ops, minus_one_ptid))
2631 return 1;
2632
2633 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2634 }
2635
2636 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2637
2638 static int
2639 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2640 {
2641 if (record_btrace_is_replaying (ops, minus_one_ptid))
2642 {
2643 struct thread_info *tp = inferior_thread ();
2644
2645 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2646 }
2647
2648 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2649 }
2650
2651 /* The to_supports_stopped_by_hw_breakpoint method of target
2652 record-btrace. */
2653
2654 static int
2655 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2656 {
2657 if (record_btrace_is_replaying (ops, minus_one_ptid))
2658 return 1;
2659
2660 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2661 }
2662
2663 /* The to_update_thread_list method of target record-btrace. */
2664
2665 static void
2666 record_btrace_update_thread_list (struct target_ops *ops)
2667 {
2668 /* We don't add or remove threads during replay. */
2669 if (record_btrace_is_replaying (ops, minus_one_ptid))
2670 return;
2671
2672 /* Forward the request. */
2673 ops = ops->beneath;
2674 ops->to_update_thread_list (ops);
2675 }
2676
2677 /* The to_thread_alive method of target record-btrace. */
2678
2679 static int
2680 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2681 {
2682 /* We don't add or remove threads during replay. */
2683 if (record_btrace_is_replaying (ops, minus_one_ptid))
2684 return find_thread_ptid (ptid) != NULL;
2685
2686 /* Forward the request. */
2687 ops = ops->beneath;
2688 return ops->to_thread_alive (ops, ptid);
2689 }
2690
2691 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2692 is stopped. */
2693
2694 static void
2695 record_btrace_set_replay (struct thread_info *tp,
2696 const struct btrace_insn_iterator *it)
2697 {
2698 struct btrace_thread_info *btinfo;
2699
2700 btinfo = &tp->btrace;
2701
2702 if (it == NULL || it->function == NULL)
2703 record_btrace_stop_replaying (tp);
2704 else
2705 {
2706 if (btinfo->replay == NULL)
2707 record_btrace_start_replaying (tp);
2708 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2709 return;
2710
2711 *btinfo->replay = *it;
2712 registers_changed_ptid (tp->ptid);
2713 }
2714
2715 /* Start anew from the new replay position. */
2716 record_btrace_clear_histories (btinfo);
2717
2718 stop_pc = regcache_read_pc (get_current_regcache ());
2719 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2720 }
2721
2722 /* The to_goto_record_begin method of target record-btrace. */
2723
2724 static void
2725 record_btrace_goto_begin (struct target_ops *self)
2726 {
2727 struct thread_info *tp;
2728 struct btrace_insn_iterator begin;
2729
2730 tp = require_btrace_thread ();
2731
2732 btrace_insn_begin (&begin, &tp->btrace);
2733 record_btrace_set_replay (tp, &begin);
2734 }
2735
2736 /* The to_goto_record_end method of target record-btrace. */
2737
2738 static void
2739 record_btrace_goto_end (struct target_ops *ops)
2740 {
2741 struct thread_info *tp;
2742
2743 tp = require_btrace_thread ();
2744
2745 record_btrace_set_replay (tp, NULL);
2746 }
2747
2748 /* The to_goto_record method of target record-btrace. */
2749
2750 static void
2751 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2752 {
2753 struct thread_info *tp;
2754 struct btrace_insn_iterator it;
2755 unsigned int number;
2756 int found;
2757
2758 number = insn;
2759
2760 /* Check for wrap-arounds. */
2761 if (number != insn)
2762 error (_("Instruction number out of range."));
2763
2764 tp = require_btrace_thread ();
2765
2766 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2767 if (found == 0)
2768 error (_("No such instruction."));
2769
2770 record_btrace_set_replay (tp, &it);
2771 }
2772
2773 /* The to_record_stop_replaying method of target record-btrace. */
2774
2775 static void
2776 record_btrace_stop_replaying_all (struct target_ops *self)
2777 {
2778 struct thread_info *tp;
2779
2780 ALL_NON_EXITED_THREADS (tp)
2781 record_btrace_stop_replaying (tp);
2782 }
2783
2784 /* The to_execution_direction target method. */
2785
2786 static enum exec_direction_kind
2787 record_btrace_execution_direction (struct target_ops *self)
2788 {
2789 return record_btrace_resume_exec_dir;
2790 }
2791
2792 /* The to_prepare_to_generate_core target method. */
2793
2794 static void
2795 record_btrace_prepare_to_generate_core (struct target_ops *self)
2796 {
2797 record_btrace_generating_corefile = 1;
2798 }
2799
2800 /* The to_done_generating_core target method. */
2801
2802 static void
2803 record_btrace_done_generating_core (struct target_ops *self)
2804 {
2805 record_btrace_generating_corefile = 0;
2806 }
2807
2808 /* Initialize the record-btrace target ops. */
2809
2810 static void
2811 init_record_btrace_ops (void)
2812 {
2813 struct target_ops *ops;
2814
2815 ops = &record_btrace_ops;
2816 ops->to_shortname = "record-btrace";
2817 ops->to_longname = "Branch tracing target";
2818 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2819 ops->to_open = record_btrace_open;
2820 ops->to_close = record_btrace_close;
2821 ops->to_async = record_btrace_async;
2822 ops->to_detach = record_detach;
2823 ops->to_disconnect = record_disconnect;
2824 ops->to_mourn_inferior = record_mourn_inferior;
2825 ops->to_kill = record_kill;
2826 ops->to_stop_recording = record_btrace_stop_recording;
2827 ops->to_info_record = record_btrace_info;
2828 ops->to_insn_history = record_btrace_insn_history;
2829 ops->to_insn_history_from = record_btrace_insn_history_from;
2830 ops->to_insn_history_range = record_btrace_insn_history_range;
2831 ops->to_call_history = record_btrace_call_history;
2832 ops->to_call_history_from = record_btrace_call_history_from;
2833 ops->to_call_history_range = record_btrace_call_history_range;
2834 ops->to_record_is_replaying = record_btrace_is_replaying;
2835 ops->to_record_will_replay = record_btrace_will_replay;
2836 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2837 ops->to_xfer_partial = record_btrace_xfer_partial;
2838 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2839 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2840 ops->to_fetch_registers = record_btrace_fetch_registers;
2841 ops->to_store_registers = record_btrace_store_registers;
2842 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2843 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2844 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2845 ops->to_resume = record_btrace_resume;
2846 ops->to_wait = record_btrace_wait;
2847 ops->to_stop = record_btrace_stop;
2848 ops->to_update_thread_list = record_btrace_update_thread_list;
2849 ops->to_thread_alive = record_btrace_thread_alive;
2850 ops->to_goto_record_begin = record_btrace_goto_begin;
2851 ops->to_goto_record_end = record_btrace_goto_end;
2852 ops->to_goto_record = record_btrace_goto;
2853 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2854 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2855 ops->to_supports_stopped_by_sw_breakpoint
2856 = record_btrace_supports_stopped_by_sw_breakpoint;
2857 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2858 ops->to_supports_stopped_by_hw_breakpoint
2859 = record_btrace_supports_stopped_by_hw_breakpoint;
2860 ops->to_execution_direction = record_btrace_execution_direction;
2861 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2862 ops->to_done_generating_core = record_btrace_done_generating_core;
2863 ops->to_stratum = record_stratum;
2864 ops->to_magic = OPS_MAGIC;
2865 }
2866
2867 /* Start recording in BTS format. */
2868
2869 static void
2870 cmd_record_btrace_bts_start (char *args, int from_tty)
2871 {
2872 if (args != NULL && *args != 0)
2873 error (_("Invalid argument."));
2874
2875 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2876
2877 TRY
2878 {
2879 execute_command ("target record-btrace", from_tty);
2880 }
2881 CATCH (exception, RETURN_MASK_ALL)
2882 {
2883 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2884 throw_exception (exception);
2885 }
2886 END_CATCH
2887 }
2888
2889 /* Start recording Intel(R) Processor Trace. */
2890
2891 static void
2892 cmd_record_btrace_pt_start (char *args, int from_tty)
2893 {
2894 if (args != NULL && *args != 0)
2895 error (_("Invalid argument."));
2896
2897 record_btrace_conf.format = BTRACE_FORMAT_PT;
2898
2899 TRY
2900 {
2901 execute_command ("target record-btrace", from_tty);
2902 }
2903 CATCH (exception, RETURN_MASK_ALL)
2904 {
2905 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2906 throw_exception (exception);
2907 }
2908 END_CATCH
2909 }
2910
2911 /* Alias for "target record". */
2912
2913 static void
2914 cmd_record_btrace_start (char *args, int from_tty)
2915 {
2916 if (args != NULL && *args != 0)
2917 error (_("Invalid argument."));
2918
2919 record_btrace_conf.format = BTRACE_FORMAT_PT;
2920
2921 TRY
2922 {
2923 execute_command ("target record-btrace", from_tty);
2924 }
2925 CATCH (exception, RETURN_MASK_ALL)
2926 {
2927 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2928
2929 TRY
2930 {
2931 execute_command ("target record-btrace", from_tty);
2932 }
2933 CATCH (exception, RETURN_MASK_ALL)
2934 {
2935 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2936 throw_exception (exception);
2937 }
2938 END_CATCH
2939 }
2940 END_CATCH
2941 }
2942
2943 /* The "set record btrace" command. */
2944
2945 static void
2946 cmd_set_record_btrace (char *args, int from_tty)
2947 {
2948 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2949 }
2950
2951 /* The "show record btrace" command. */
2952
2953 static void
2954 cmd_show_record_btrace (char *args, int from_tty)
2955 {
2956 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2957 }
2958
2959 /* The "show record btrace replay-memory-access" command. */
2960
2961 static void
2962 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2963 struct cmd_list_element *c, const char *value)
2964 {
2965 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2966 replay_memory_access);
2967 }
2968
2969 /* The "set record btrace bts" command. */
2970
2971 static void
2972 cmd_set_record_btrace_bts (char *args, int from_tty)
2973 {
2974 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2975 "by an appropriate subcommand.\n"));
2976 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2977 all_commands, gdb_stdout);
2978 }
2979
2980 /* The "show record btrace bts" command. */
2981
2982 static void
2983 cmd_show_record_btrace_bts (char *args, int from_tty)
2984 {
2985 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2986 }
2987
2988 /* The "set record btrace pt" command. */
2989
2990 static void
2991 cmd_set_record_btrace_pt (char *args, int from_tty)
2992 {
2993 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2994 "by an appropriate subcommand.\n"));
2995 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2996 all_commands, gdb_stdout);
2997 }
2998
2999 /* The "show record btrace pt" command. */
3000
3001 static void
3002 cmd_show_record_btrace_pt (char *args, int from_tty)
3003 {
3004 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3005 }
3006
3007 /* The "record bts buffer-size" show value function. */
3008
3009 static void
3010 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3011 struct cmd_list_element *c,
3012 const char *value)
3013 {
3014 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3015 value);
3016 }
3017
3018 /* The "record pt buffer-size" show value function. */
3019
3020 static void
3021 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3022 struct cmd_list_element *c,
3023 const char *value)
3024 {
3025 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3026 value);
3027 }
3028
3029 void _initialize_record_btrace (void);
3030
3031 /* Initialize btrace commands. */
3032
3033 void
3034 _initialize_record_btrace (void)
3035 {
3036 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3037 _("Start branch trace recording."), &record_btrace_cmdlist,
3038 "record btrace ", 0, &record_cmdlist);
3039 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3040
3041 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3042 _("\
3043 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3044 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3045 This format may not be available on all processors."),
3046 &record_btrace_cmdlist);
3047 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3048
3049 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3050 _("\
3051 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
3052 This format may not be available on all processors."),
3053 &record_btrace_cmdlist);
3054 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3055
3056 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3057 _("Set record options"), &set_record_btrace_cmdlist,
3058 "set record btrace ", 0, &set_record_cmdlist);
3059
3060 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3061 _("Show record options"), &show_record_btrace_cmdlist,
3062 "show record btrace ", 0, &show_record_cmdlist);
3063
3064 add_setshow_enum_cmd ("replay-memory-access", no_class,
3065 replay_memory_access_types, &replay_memory_access, _("\
3066 Set what memory accesses are allowed during replay."), _("\
3067 Show what memory accesses are allowed during replay."),
3068 _("Default is READ-ONLY.\n\n\
3069 The btrace record target does not trace data.\n\
3070 The memory therefore corresponds to the live target and not \
3071 to the current replay position.\n\n\
3072 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3073 When READ-WRITE, allow accesses to read-only and read-write memory during \
3074 replay."),
3075 NULL, cmd_show_replay_memory_access,
3076 &set_record_btrace_cmdlist,
3077 &show_record_btrace_cmdlist);
3078
3079 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3080 _("Set record btrace bts options"),
3081 &set_record_btrace_bts_cmdlist,
3082 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3083
3084 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3085 _("Show record btrace bts options"),
3086 &show_record_btrace_bts_cmdlist,
3087 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3088
3089 add_setshow_uinteger_cmd ("buffer-size", no_class,
3090 &record_btrace_conf.bts.size,
3091 _("Set the record/replay bts buffer size."),
3092 _("Show the record/replay bts buffer size."), _("\
3093 When starting recording request a trace buffer of this size. \
3094 The actual buffer size may differ from the requested size. \
3095 Use \"info record\" to see the actual buffer size.\n\n\
3096 Bigger buffers allow longer recording but also take more time to process \
3097 the recorded execution trace.\n\n\
3098 The trace buffer size may not be changed while recording."), NULL,
3099 show_record_bts_buffer_size_value,
3100 &set_record_btrace_bts_cmdlist,
3101 &show_record_btrace_bts_cmdlist);
3102
3103 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3104 _("Set record btrace pt options"),
3105 &set_record_btrace_pt_cmdlist,
3106 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3107
3108 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3109 _("Show record btrace pt options"),
3110 &show_record_btrace_pt_cmdlist,
3111 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3112
3113 add_setshow_uinteger_cmd ("buffer-size", no_class,
3114 &record_btrace_conf.pt.size,
3115 _("Set the record/replay pt buffer size."),
3116 _("Show the record/replay pt buffer size."), _("\
3117 Bigger buffers allow longer recording but also take more time to process \
3118 the recorded execution.\n\
3119 The actual buffer size may differ from the requested size. Use \"info record\" \
3120 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3121 &set_record_btrace_pt_cmdlist,
3122 &show_record_btrace_pt_cmdlist);
3123
3124 init_record_btrace_ops ();
3125 add_target (&record_btrace_ops);
3126
3127 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3128 xcalloc, xfree);
3129
3130 record_btrace_conf.bts.size = 64 * 1024;
3131 record_btrace_conf.pt.size = 16 * 1024;
3132 }