]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Constify some commands in record-btrace.c
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2017 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "record-btrace.h"
25 #include "gdbthread.h"
26 #include "target.h"
27 #include "gdbcmd.h"
28 #include "disasm.h"
29 #include "observer.h"
30 #include "cli/cli-utils.h"
31 #include "source.h"
32 #include "ui-out.h"
33 #include "symtab.h"
34 #include "filenames.h"
35 #include "regcache.h"
36 #include "frame-unwind.h"
37 #include "hashtab.h"
38 #include "infrun.h"
39 #include "event-loop.h"
40 #include "inf-loop.h"
41 #include "vec.h"
42 #include <algorithm>
43
44 /* The target_ops of record-btrace. */
45 static struct target_ops record_btrace_ops;
46
47 /* A new thread observer enabling branch tracing for the new thread. */
48 static struct observer *record_btrace_thread_observer;
49
50 /* Memory access types used in set/show record btrace replay-memory-access. */
51 static const char replay_memory_access_read_only[] = "read-only";
52 static const char replay_memory_access_read_write[] = "read-write";
53 static const char *const replay_memory_access_types[] =
54 {
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58 };
59
60 /* The currently allowed replay memory access type. */
61 static const char *replay_memory_access = replay_memory_access_read_only;
62
63 /* Command lists for "set/show record btrace". */
64 static struct cmd_list_element *set_record_btrace_cmdlist;
65 static struct cmd_list_element *show_record_btrace_cmdlist;
66
67 /* The execution direction of the last resume we got. See record-full.c. */
68 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70 /* The async event handler for reverse/replay execution. */
71 static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
73 /* A flag indicating that we are currently generating a core file. */
74 static int record_btrace_generating_corefile;
75
76 /* The current branch trace configuration. */
77 static struct btrace_config record_btrace_conf;
78
79 /* Command list for "record btrace". */
80 static struct cmd_list_element *record_btrace_cmdlist;
81
82 /* Command lists for "set/show record btrace bts". */
83 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
86 /* Command lists for "set/show record btrace pt". */
87 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
90 /* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93 #define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103 /* Update the branch trace for the current thread and return a pointer to its
104 thread_info.
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
109 static struct thread_info *
110 require_btrace_thread (void)
111 {
112 struct thread_info *tp;
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
120 validate_registers_access ();
121
122 btrace_fetch (tp);
123
124 if (btrace_is_empty (tp))
125 error (_("No trace."));
126
127 return tp;
128 }
129
130 /* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136 static struct btrace_thread_info *
137 require_btrace (void)
138 {
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
144 }
145
146 /* Enable branch tracing for one thread. Warn on errors. */
147
148 static void
149 record_btrace_enable_warn (struct thread_info *tp)
150 {
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
160 }
161
162 /* Callback function to disable branch tracing for one thread. */
163
164 static void
165 record_btrace_disable_callback (void *arg)
166 {
167 struct thread_info *tp = (struct thread_info *) arg;
168
169 btrace_disable (tp);
170 }
171
172 /* Enable automatic tracing of new threads. */
173
174 static void
175 record_btrace_auto_enable (void)
176 {
177 DEBUG ("attach thread observer");
178
179 record_btrace_thread_observer
180 = observer_attach_new_thread (record_btrace_enable_warn);
181 }
182
183 /* Disable automatic tracing of new threads. */
184
185 static void
186 record_btrace_auto_disable (void)
187 {
188 /* The observer may have been detached, already. */
189 if (record_btrace_thread_observer == NULL)
190 return;
191
192 DEBUG ("detach thread observer");
193
194 observer_detach_new_thread (record_btrace_thread_observer);
195 record_btrace_thread_observer = NULL;
196 }
197
198 /* The record-btrace async event handler function. */
199
200 static void
201 record_btrace_handle_async_inferior_event (gdb_client_data data)
202 {
203 inferior_event_handler (INF_REG_EVENT, NULL);
204 }
205
206 /* See record-btrace.h. */
207
208 void
209 record_btrace_push_target (void)
210 {
211 const char *format;
212
213 record_btrace_auto_enable ();
214
215 push_target (&record_btrace_ops);
216
217 record_btrace_async_inferior_event_handler
218 = create_async_event_handler (record_btrace_handle_async_inferior_event,
219 NULL);
220 record_btrace_generating_corefile = 0;
221
222 format = btrace_format_short_string (record_btrace_conf.format);
223 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
224 }
225
226 /* The to_open method of target record-btrace. */
227
228 static void
229 record_btrace_open (const char *args, int from_tty)
230 {
231 struct cleanup *disable_chain;
232 struct thread_info *tp;
233
234 DEBUG ("open");
235
236 record_preopen ();
237
238 if (!target_has_execution)
239 error (_("The program is not being run."));
240
241 gdb_assert (record_btrace_thread_observer == NULL);
242
243 disable_chain = make_cleanup (null_cleanup, NULL);
244 ALL_NON_EXITED_THREADS (tp)
245 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
246 {
247 btrace_enable (tp, &record_btrace_conf);
248
249 make_cleanup (record_btrace_disable_callback, tp);
250 }
251
252 record_btrace_push_target ();
253
254 discard_cleanups (disable_chain);
255 }
256
257 /* The to_stop_recording method of target record-btrace. */
258
259 static void
260 record_btrace_stop_recording (struct target_ops *self)
261 {
262 struct thread_info *tp;
263
264 DEBUG ("stop recording");
265
266 record_btrace_auto_disable ();
267
268 ALL_NON_EXITED_THREADS (tp)
269 if (tp->btrace.target != NULL)
270 btrace_disable (tp);
271 }
272
273 /* The to_disconnect method of target record-btrace. */
274
275 static void
276 record_btrace_disconnect (struct target_ops *self, const char *args,
277 int from_tty)
278 {
279 struct target_ops *beneath = self->beneath;
280
281 /* Do not stop recording, just clean up GDB side. */
282 unpush_target (self);
283
284 /* Forward disconnect. */
285 beneath->to_disconnect (beneath, args, from_tty);
286 }
287
288 /* The to_close method of target record-btrace. */
289
290 static void
291 record_btrace_close (struct target_ops *self)
292 {
293 struct thread_info *tp;
294
295 if (record_btrace_async_inferior_event_handler != NULL)
296 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
297
298 /* Make sure automatic recording gets disabled even if we did not stop
299 recording before closing the record-btrace target. */
300 record_btrace_auto_disable ();
301
302 /* We should have already stopped recording.
303 Tear down btrace in case we have not. */
304 ALL_NON_EXITED_THREADS (tp)
305 btrace_teardown (tp);
306 }
307
308 /* The to_async method of target record-btrace. */
309
310 static void
311 record_btrace_async (struct target_ops *ops, int enable)
312 {
313 if (enable)
314 mark_async_event_handler (record_btrace_async_inferior_event_handler);
315 else
316 clear_async_event_handler (record_btrace_async_inferior_event_handler);
317
318 ops->beneath->to_async (ops->beneath, enable);
319 }
320
321 /* Adjusts the size and returns a human readable size suffix. */
322
323 static const char *
324 record_btrace_adjust_size (unsigned int *size)
325 {
326 unsigned int sz;
327
328 sz = *size;
329
330 if ((sz & ((1u << 30) - 1)) == 0)
331 {
332 *size = sz >> 30;
333 return "GB";
334 }
335 else if ((sz & ((1u << 20) - 1)) == 0)
336 {
337 *size = sz >> 20;
338 return "MB";
339 }
340 else if ((sz & ((1u << 10) - 1)) == 0)
341 {
342 *size = sz >> 10;
343 return "kB";
344 }
345 else
346 return "";
347 }
348
349 /* Print a BTS configuration. */
350
351 static void
352 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
353 {
354 const char *suffix;
355 unsigned int size;
356
357 size = conf->size;
358 if (size > 0)
359 {
360 suffix = record_btrace_adjust_size (&size);
361 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
362 }
363 }
364
365 /* Print an Intel Processor Trace configuration. */
366
367 static void
368 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
369 {
370 const char *suffix;
371 unsigned int size;
372
373 size = conf->size;
374 if (size > 0)
375 {
376 suffix = record_btrace_adjust_size (&size);
377 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
378 }
379 }
380
381 /* Print a branch tracing configuration. */
382
383 static void
384 record_btrace_print_conf (const struct btrace_config *conf)
385 {
386 printf_unfiltered (_("Recording format: %s.\n"),
387 btrace_format_string (conf->format));
388
389 switch (conf->format)
390 {
391 case BTRACE_FORMAT_NONE:
392 return;
393
394 case BTRACE_FORMAT_BTS:
395 record_btrace_print_bts_conf (&conf->bts);
396 return;
397
398 case BTRACE_FORMAT_PT:
399 record_btrace_print_pt_conf (&conf->pt);
400 return;
401 }
402
403 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
404 }
405
406 /* The to_info_record method of target record-btrace. */
407
408 static void
409 record_btrace_info (struct target_ops *self)
410 {
411 struct btrace_thread_info *btinfo;
412 const struct btrace_config *conf;
413 struct thread_info *tp;
414 unsigned int insns, calls, gaps;
415
416 DEBUG ("info");
417
418 tp = find_thread_ptid (inferior_ptid);
419 if (tp == NULL)
420 error (_("No thread."));
421
422 validate_registers_access ();
423
424 btinfo = &tp->btrace;
425
426 conf = btrace_conf (btinfo);
427 if (conf != NULL)
428 record_btrace_print_conf (conf);
429
430 btrace_fetch (tp);
431
432 insns = 0;
433 calls = 0;
434 gaps = 0;
435
436 if (!btrace_is_empty (tp))
437 {
438 struct btrace_call_iterator call;
439 struct btrace_insn_iterator insn;
440
441 btrace_call_end (&call, btinfo);
442 btrace_call_prev (&call, 1);
443 calls = btrace_call_number (&call);
444
445 btrace_insn_end (&insn, btinfo);
446 insns = btrace_insn_number (&insn);
447
448 /* If the last instruction is not a gap, it is the current instruction
449 that is not actually part of the record. */
450 if (btrace_insn_get (&insn) != NULL)
451 insns -= 1;
452
453 gaps = btinfo->ngaps;
454 }
455
456 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
457 "for thread %s (%s).\n"), insns, calls, gaps,
458 print_thread_id (tp), target_pid_to_str (tp->ptid));
459
460 if (btrace_is_replaying (tp))
461 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
462 btrace_insn_number (btinfo->replay));
463 }
464
465 /* Print a decode error. */
466
467 static void
468 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
469 enum btrace_format format)
470 {
471 const char *errstr = btrace_decode_error (format, errcode);
472
473 uiout->text (_("["));
474 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
475 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
476 {
477 uiout->text (_("decode error ("));
478 uiout->field_int ("errcode", errcode);
479 uiout->text (_("): "));
480 }
481 uiout->text (errstr);
482 uiout->text (_("]\n"));
483 }
484
485 /* Print an unsigned int. */
486
487 static void
488 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
489 {
490 uiout->field_fmt (fld, "%u", val);
491 }
492
493 /* A range of source lines. */
494
495 struct btrace_line_range
496 {
497 /* The symtab this line is from. */
498 struct symtab *symtab;
499
500 /* The first line (inclusive). */
501 int begin;
502
503 /* The last line (exclusive). */
504 int end;
505 };
506
507 /* Construct a line range. */
508
509 static struct btrace_line_range
510 btrace_mk_line_range (struct symtab *symtab, int begin, int end)
511 {
512 struct btrace_line_range range;
513
514 range.symtab = symtab;
515 range.begin = begin;
516 range.end = end;
517
518 return range;
519 }
520
521 /* Add a line to a line range. */
522
523 static struct btrace_line_range
524 btrace_line_range_add (struct btrace_line_range range, int line)
525 {
526 if (range.end <= range.begin)
527 {
528 /* This is the first entry. */
529 range.begin = line;
530 range.end = line + 1;
531 }
532 else if (line < range.begin)
533 range.begin = line;
534 else if (range.end < line)
535 range.end = line;
536
537 return range;
538 }
539
540 /* Return non-zero if RANGE is empty, zero otherwise. */
541
542 static int
543 btrace_line_range_is_empty (struct btrace_line_range range)
544 {
545 return range.end <= range.begin;
546 }
547
548 /* Return non-zero if LHS contains RHS, zero otherwise. */
549
550 static int
551 btrace_line_range_contains_range (struct btrace_line_range lhs,
552 struct btrace_line_range rhs)
553 {
554 return ((lhs.symtab == rhs.symtab)
555 && (lhs.begin <= rhs.begin)
556 && (rhs.end <= lhs.end));
557 }
558
559 /* Find the line range associated with PC. */
560
561 static struct btrace_line_range
562 btrace_find_line_range (CORE_ADDR pc)
563 {
564 struct btrace_line_range range;
565 struct linetable_entry *lines;
566 struct linetable *ltable;
567 struct symtab *symtab;
568 int nlines, i;
569
570 symtab = find_pc_line_symtab (pc);
571 if (symtab == NULL)
572 return btrace_mk_line_range (NULL, 0, 0);
573
574 ltable = SYMTAB_LINETABLE (symtab);
575 if (ltable == NULL)
576 return btrace_mk_line_range (symtab, 0, 0);
577
578 nlines = ltable->nitems;
579 lines = ltable->item;
580 if (nlines <= 0)
581 return btrace_mk_line_range (symtab, 0, 0);
582
583 range = btrace_mk_line_range (symtab, 0, 0);
584 for (i = 0; i < nlines - 1; i++)
585 {
586 if ((lines[i].pc == pc) && (lines[i].line != 0))
587 range = btrace_line_range_add (range, lines[i].line);
588 }
589
590 return range;
591 }
592
593 /* Print source lines in LINES to UIOUT.
594
595 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
596 instructions corresponding to that source line. When printing a new source
597 line, we do the cleanups for the open chain and open a new cleanup chain for
598 the new source line. If the source line range in LINES is not empty, this
599 function will leave the cleanup chain for the last printed source line open
600 so instructions can be added to it. */
601
602 static void
603 btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
604 struct cleanup **ui_item_chain, int flags)
605 {
606 print_source_lines_flags psl_flags;
607 int line;
608
609 psl_flags = 0;
610 if (flags & DISASSEMBLY_FILENAME)
611 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
612
613 for (line = lines.begin; line < lines.end; ++line)
614 {
615 if (*ui_item_chain != NULL)
616 do_cleanups (*ui_item_chain);
617
618 *ui_item_chain
619 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
620
621 print_source_lines (lines.symtab, line, line + 1, psl_flags);
622
623 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
624 }
625 }
626
627 /* Disassemble a section of the recorded instruction trace. */
628
629 static void
630 btrace_insn_history (struct ui_out *uiout,
631 const struct btrace_thread_info *btinfo,
632 const struct btrace_insn_iterator *begin,
633 const struct btrace_insn_iterator *end,
634 gdb_disassembly_flags flags)
635 {
636 struct cleanup *cleanups, *ui_item_chain;
637 struct gdbarch *gdbarch;
638 struct btrace_insn_iterator it;
639 struct btrace_line_range last_lines;
640
641 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
642 btrace_insn_number (begin), btrace_insn_number (end));
643
644 flags |= DISASSEMBLY_SPECULATIVE;
645
646 gdbarch = target_gdbarch ();
647 last_lines = btrace_mk_line_range (NULL, 0, 0);
648
649 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
650
651 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
652 instructions corresponding to that line. */
653 ui_item_chain = NULL;
654
655 gdb_pretty_print_disassembler disasm (gdbarch);
656
657 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
658 {
659 const struct btrace_insn *insn;
660
661 insn = btrace_insn_get (&it);
662
663 /* A NULL instruction indicates a gap in the trace. */
664 if (insn == NULL)
665 {
666 const struct btrace_config *conf;
667
668 conf = btrace_conf (btinfo);
669
670 /* We have trace so we must have a configuration. */
671 gdb_assert (conf != NULL);
672
673 uiout->field_fmt ("insn-number", "%u",
674 btrace_insn_number (&it));
675 uiout->text ("\t");
676
677 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
678 conf->format);
679 }
680 else
681 {
682 struct disasm_insn dinsn;
683
684 if ((flags & DISASSEMBLY_SOURCE) != 0)
685 {
686 struct btrace_line_range lines;
687
688 lines = btrace_find_line_range (insn->pc);
689 if (!btrace_line_range_is_empty (lines)
690 && !btrace_line_range_contains_range (last_lines, lines))
691 {
692 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
693 last_lines = lines;
694 }
695 else if (ui_item_chain == NULL)
696 {
697 ui_item_chain
698 = make_cleanup_ui_out_tuple_begin_end (uiout,
699 "src_and_asm_line");
700 /* No source information. */
701 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
702 }
703
704 gdb_assert (ui_item_chain != NULL);
705 }
706
707 memset (&dinsn, 0, sizeof (dinsn));
708 dinsn.number = btrace_insn_number (&it);
709 dinsn.addr = insn->pc;
710
711 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
712 dinsn.is_speculative = 1;
713
714 disasm.pretty_print_insn (uiout, &dinsn, flags);
715 }
716 }
717
718 do_cleanups (cleanups);
719 }
720
721 /* The to_insn_history method of target record-btrace. */
722
723 static void
724 record_btrace_insn_history (struct target_ops *self, int size,
725 gdb_disassembly_flags flags)
726 {
727 struct btrace_thread_info *btinfo;
728 struct btrace_insn_history *history;
729 struct btrace_insn_iterator begin, end;
730 struct ui_out *uiout;
731 unsigned int context, covered;
732
733 uiout = current_uiout;
734 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
735 context = abs (size);
736 if (context == 0)
737 error (_("Bad record instruction-history-size."));
738
739 btinfo = require_btrace ();
740 history = btinfo->insn_history;
741 if (history == NULL)
742 {
743 struct btrace_insn_iterator *replay;
744
745 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
746
747 /* If we're replaying, we start at the replay position. Otherwise, we
748 start at the tail of the trace. */
749 replay = btinfo->replay;
750 if (replay != NULL)
751 begin = *replay;
752 else
753 btrace_insn_end (&begin, btinfo);
754
755 /* We start from here and expand in the requested direction. Then we
756 expand in the other direction, as well, to fill up any remaining
757 context. */
758 end = begin;
759 if (size < 0)
760 {
761 /* We want the current position covered, as well. */
762 covered = btrace_insn_next (&end, 1);
763 covered += btrace_insn_prev (&begin, context - covered);
764 covered += btrace_insn_next (&end, context - covered);
765 }
766 else
767 {
768 covered = btrace_insn_next (&end, context);
769 covered += btrace_insn_prev (&begin, context - covered);
770 }
771 }
772 else
773 {
774 begin = history->begin;
775 end = history->end;
776
777 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
778 btrace_insn_number (&begin), btrace_insn_number (&end));
779
780 if (size < 0)
781 {
782 end = begin;
783 covered = btrace_insn_prev (&begin, context);
784 }
785 else
786 {
787 begin = end;
788 covered = btrace_insn_next (&end, context);
789 }
790 }
791
792 if (covered > 0)
793 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
794 else
795 {
796 if (size < 0)
797 printf_unfiltered (_("At the start of the branch trace record.\n"));
798 else
799 printf_unfiltered (_("At the end of the branch trace record.\n"));
800 }
801
802 btrace_set_insn_history (btinfo, &begin, &end);
803 }
804
805 /* The to_insn_history_range method of target record-btrace. */
806
807 static void
808 record_btrace_insn_history_range (struct target_ops *self,
809 ULONGEST from, ULONGEST to,
810 gdb_disassembly_flags flags)
811 {
812 struct btrace_thread_info *btinfo;
813 struct btrace_insn_history *history;
814 struct btrace_insn_iterator begin, end;
815 struct ui_out *uiout;
816 unsigned int low, high;
817 int found;
818
819 uiout = current_uiout;
820 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
821 low = from;
822 high = to;
823
824 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
825
826 /* Check for wrap-arounds. */
827 if (low != from || high != to)
828 error (_("Bad range."));
829
830 if (high < low)
831 error (_("Bad range."));
832
833 btinfo = require_btrace ();
834
835 found = btrace_find_insn_by_number (&begin, btinfo, low);
836 if (found == 0)
837 error (_("Range out of bounds."));
838
839 found = btrace_find_insn_by_number (&end, btinfo, high);
840 if (found == 0)
841 {
842 /* Silently truncate the range. */
843 btrace_insn_end (&end, btinfo);
844 }
845 else
846 {
847 /* We want both begin and end to be inclusive. */
848 btrace_insn_next (&end, 1);
849 }
850
851 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
852 btrace_set_insn_history (btinfo, &begin, &end);
853 }
854
855 /* The to_insn_history_from method of target record-btrace. */
856
857 static void
858 record_btrace_insn_history_from (struct target_ops *self,
859 ULONGEST from, int size,
860 gdb_disassembly_flags flags)
861 {
862 ULONGEST begin, end, context;
863
864 context = abs (size);
865 if (context == 0)
866 error (_("Bad record instruction-history-size."));
867
868 if (size < 0)
869 {
870 end = from;
871
872 if (from < context)
873 begin = 0;
874 else
875 begin = from - context + 1;
876 }
877 else
878 {
879 begin = from;
880 end = from + context - 1;
881
882 /* Check for wrap-around. */
883 if (end < begin)
884 end = ULONGEST_MAX;
885 }
886
887 record_btrace_insn_history_range (self, begin, end, flags);
888 }
889
890 /* Print the instruction number range for a function call history line. */
891
892 static void
893 btrace_call_history_insn_range (struct ui_out *uiout,
894 const struct btrace_function *bfun)
895 {
896 unsigned int begin, end, size;
897
898 size = bfun->insn.size ();
899 gdb_assert (size > 0);
900
901 begin = bfun->insn_offset;
902 end = begin + size - 1;
903
904 ui_out_field_uint (uiout, "insn begin", begin);
905 uiout->text (",");
906 ui_out_field_uint (uiout, "insn end", end);
907 }
908
909 /* Compute the lowest and highest source line for the instructions in BFUN
910 and return them in PBEGIN and PEND.
911 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
912 result from inlining or macro expansion. */
913
914 static void
915 btrace_compute_src_line_range (const struct btrace_function *bfun,
916 int *pbegin, int *pend)
917 {
918 struct symtab *symtab;
919 struct symbol *sym;
920 int begin, end;
921
922 begin = INT_MAX;
923 end = INT_MIN;
924
925 sym = bfun->sym;
926 if (sym == NULL)
927 goto out;
928
929 symtab = symbol_symtab (sym);
930
931 for (const btrace_insn &insn : bfun->insn)
932 {
933 struct symtab_and_line sal;
934
935 sal = find_pc_line (insn.pc, 0);
936 if (sal.symtab != symtab || sal.line == 0)
937 continue;
938
939 begin = std::min (begin, sal.line);
940 end = std::max (end, sal.line);
941 }
942
943 out:
944 *pbegin = begin;
945 *pend = end;
946 }
947
948 /* Print the source line information for a function call history line. */
949
950 static void
951 btrace_call_history_src_line (struct ui_out *uiout,
952 const struct btrace_function *bfun)
953 {
954 struct symbol *sym;
955 int begin, end;
956
957 sym = bfun->sym;
958 if (sym == NULL)
959 return;
960
961 uiout->field_string ("file",
962 symtab_to_filename_for_display (symbol_symtab (sym)));
963
964 btrace_compute_src_line_range (bfun, &begin, &end);
965 if (end < begin)
966 return;
967
968 uiout->text (":");
969 uiout->field_int ("min line", begin);
970
971 if (end == begin)
972 return;
973
974 uiout->text (",");
975 uiout->field_int ("max line", end);
976 }
977
978 /* Get the name of a branch trace function. */
979
980 static const char *
981 btrace_get_bfun_name (const struct btrace_function *bfun)
982 {
983 struct minimal_symbol *msym;
984 struct symbol *sym;
985
986 if (bfun == NULL)
987 return "??";
988
989 msym = bfun->msym;
990 sym = bfun->sym;
991
992 if (sym != NULL)
993 return SYMBOL_PRINT_NAME (sym);
994 else if (msym != NULL)
995 return MSYMBOL_PRINT_NAME (msym);
996 else
997 return "??";
998 }
999
1000 /* Disassemble a section of the recorded function trace. */
1001
1002 static void
1003 btrace_call_history (struct ui_out *uiout,
1004 const struct btrace_thread_info *btinfo,
1005 const struct btrace_call_iterator *begin,
1006 const struct btrace_call_iterator *end,
1007 int int_flags)
1008 {
1009 struct btrace_call_iterator it;
1010 record_print_flags flags = (enum record_print_flag) int_flags;
1011
1012 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
1013 btrace_call_number (end));
1014
1015 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
1016 {
1017 const struct btrace_function *bfun;
1018 struct minimal_symbol *msym;
1019 struct symbol *sym;
1020
1021 bfun = btrace_call_get (&it);
1022 sym = bfun->sym;
1023 msym = bfun->msym;
1024
1025 /* Print the function index. */
1026 ui_out_field_uint (uiout, "index", bfun->number);
1027 uiout->text ("\t");
1028
1029 /* Indicate gaps in the trace. */
1030 if (bfun->errcode != 0)
1031 {
1032 const struct btrace_config *conf;
1033
1034 conf = btrace_conf (btinfo);
1035
1036 /* We have trace so we must have a configuration. */
1037 gdb_assert (conf != NULL);
1038
1039 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1040
1041 continue;
1042 }
1043
1044 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1045 {
1046 int level = bfun->level + btinfo->level, i;
1047
1048 for (i = 0; i < level; ++i)
1049 uiout->text (" ");
1050 }
1051
1052 if (sym != NULL)
1053 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
1054 else if (msym != NULL)
1055 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1056 else if (!uiout->is_mi_like_p ())
1057 uiout->field_string ("function", "??");
1058
1059 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
1060 {
1061 uiout->text (_("\tinst "));
1062 btrace_call_history_insn_range (uiout, bfun);
1063 }
1064
1065 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
1066 {
1067 uiout->text (_("\tat "));
1068 btrace_call_history_src_line (uiout, bfun);
1069 }
1070
1071 uiout->text ("\n");
1072 }
1073 }
1074
1075 /* The to_call_history method of target record-btrace. */
1076
1077 static void
1078 record_btrace_call_history (struct target_ops *self, int size, int int_flags)
1079 {
1080 struct btrace_thread_info *btinfo;
1081 struct btrace_call_history *history;
1082 struct btrace_call_iterator begin, end;
1083 struct ui_out *uiout;
1084 unsigned int context, covered;
1085 record_print_flags flags = (enum record_print_flag) int_flags;
1086
1087 uiout = current_uiout;
1088 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
1089 context = abs (size);
1090 if (context == 0)
1091 error (_("Bad record function-call-history-size."));
1092
1093 btinfo = require_btrace ();
1094 history = btinfo->call_history;
1095 if (history == NULL)
1096 {
1097 struct btrace_insn_iterator *replay;
1098
1099 DEBUG ("call-history (0x%x): %d", int_flags, size);
1100
1101 /* If we're replaying, we start at the replay position. Otherwise, we
1102 start at the tail of the trace. */
1103 replay = btinfo->replay;
1104 if (replay != NULL)
1105 {
1106 begin.btinfo = btinfo;
1107 begin.index = replay->call_index;
1108 }
1109 else
1110 btrace_call_end (&begin, btinfo);
1111
1112 /* We start from here and expand in the requested direction. Then we
1113 expand in the other direction, as well, to fill up any remaining
1114 context. */
1115 end = begin;
1116 if (size < 0)
1117 {
1118 /* We want the current position covered, as well. */
1119 covered = btrace_call_next (&end, 1);
1120 covered += btrace_call_prev (&begin, context - covered);
1121 covered += btrace_call_next (&end, context - covered);
1122 }
1123 else
1124 {
1125 covered = btrace_call_next (&end, context);
1126 covered += btrace_call_prev (&begin, context- covered);
1127 }
1128 }
1129 else
1130 {
1131 begin = history->begin;
1132 end = history->end;
1133
1134 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", int_flags, size,
1135 btrace_call_number (&begin), btrace_call_number (&end));
1136
1137 if (size < 0)
1138 {
1139 end = begin;
1140 covered = btrace_call_prev (&begin, context);
1141 }
1142 else
1143 {
1144 begin = end;
1145 covered = btrace_call_next (&end, context);
1146 }
1147 }
1148
1149 if (covered > 0)
1150 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1151 else
1152 {
1153 if (size < 0)
1154 printf_unfiltered (_("At the start of the branch trace record.\n"));
1155 else
1156 printf_unfiltered (_("At the end of the branch trace record.\n"));
1157 }
1158
1159 btrace_set_call_history (btinfo, &begin, &end);
1160 }
1161
1162 /* The to_call_history_range method of target record-btrace. */
1163
1164 static void
1165 record_btrace_call_history_range (struct target_ops *self,
1166 ULONGEST from, ULONGEST to,
1167 int int_flags)
1168 {
1169 struct btrace_thread_info *btinfo;
1170 struct btrace_call_history *history;
1171 struct btrace_call_iterator begin, end;
1172 struct ui_out *uiout;
1173 unsigned int low, high;
1174 int found;
1175 record_print_flags flags = (enum record_print_flag) int_flags;
1176
1177 uiout = current_uiout;
1178 ui_out_emit_tuple tuple_emitter (uiout, "func history");
1179 low = from;
1180 high = to;
1181
1182 DEBUG ("call-history (0x%x): [%u; %u)", int_flags, low, high);
1183
1184 /* Check for wrap-arounds. */
1185 if (low != from || high != to)
1186 error (_("Bad range."));
1187
1188 if (high < low)
1189 error (_("Bad range."));
1190
1191 btinfo = require_btrace ();
1192
1193 found = btrace_find_call_by_number (&begin, btinfo, low);
1194 if (found == 0)
1195 error (_("Range out of bounds."));
1196
1197 found = btrace_find_call_by_number (&end, btinfo, high);
1198 if (found == 0)
1199 {
1200 /* Silently truncate the range. */
1201 btrace_call_end (&end, btinfo);
1202 }
1203 else
1204 {
1205 /* We want both begin and end to be inclusive. */
1206 btrace_call_next (&end, 1);
1207 }
1208
1209 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1210 btrace_set_call_history (btinfo, &begin, &end);
1211 }
1212
1213 /* The to_call_history_from method of target record-btrace. */
1214
1215 static void
1216 record_btrace_call_history_from (struct target_ops *self,
1217 ULONGEST from, int size,
1218 int int_flags)
1219 {
1220 ULONGEST begin, end, context;
1221 record_print_flags flags = (enum record_print_flag) int_flags;
1222
1223 context = abs (size);
1224 if (context == 0)
1225 error (_("Bad record function-call-history-size."));
1226
1227 if (size < 0)
1228 {
1229 end = from;
1230
1231 if (from < context)
1232 begin = 0;
1233 else
1234 begin = from - context + 1;
1235 }
1236 else
1237 {
1238 begin = from;
1239 end = from + context - 1;
1240
1241 /* Check for wrap-around. */
1242 if (end < begin)
1243 end = ULONGEST_MAX;
1244 }
1245
1246 record_btrace_call_history_range (self, begin, end, flags);
1247 }
1248
1249 /* The to_record_method method of target record-btrace. */
1250
1251 static enum record_method
1252 record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1253 {
1254 const struct btrace_config *config;
1255 struct thread_info * const tp = find_thread_ptid (ptid);
1256
1257 if (tp == NULL)
1258 error (_("No thread."));
1259
1260 if (tp->btrace.target == NULL)
1261 return RECORD_METHOD_NONE;
1262
1263 return RECORD_METHOD_BTRACE;
1264 }
1265
1266 /* The to_record_is_replaying method of target record-btrace. */
1267
1268 static int
1269 record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
1270 {
1271 struct thread_info *tp;
1272
1273 ALL_NON_EXITED_THREADS (tp)
1274 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
1275 return 1;
1276
1277 return 0;
1278 }
1279
1280 /* The to_record_will_replay method of target record-btrace. */
1281
1282 static int
1283 record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1284 {
1285 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1286 }
1287
1288 /* The to_xfer_partial method of target record-btrace. */
1289
1290 static enum target_xfer_status
1291 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1292 const char *annex, gdb_byte *readbuf,
1293 const gdb_byte *writebuf, ULONGEST offset,
1294 ULONGEST len, ULONGEST *xfered_len)
1295 {
1296 struct target_ops *t;
1297
1298 /* Filter out requests that don't make sense during replay. */
1299 if (replay_memory_access == replay_memory_access_read_only
1300 && !record_btrace_generating_corefile
1301 && record_btrace_is_replaying (ops, inferior_ptid))
1302 {
1303 switch (object)
1304 {
1305 case TARGET_OBJECT_MEMORY:
1306 {
1307 struct target_section *section;
1308
1309 /* We do not allow writing memory in general. */
1310 if (writebuf != NULL)
1311 {
1312 *xfered_len = len;
1313 return TARGET_XFER_UNAVAILABLE;
1314 }
1315
1316 /* We allow reading readonly memory. */
1317 section = target_section_by_addr (ops, offset);
1318 if (section != NULL)
1319 {
1320 /* Check if the section we found is readonly. */
1321 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1322 section->the_bfd_section)
1323 & SEC_READONLY) != 0)
1324 {
1325 /* Truncate the request to fit into this section. */
1326 len = std::min (len, section->endaddr - offset);
1327 break;
1328 }
1329 }
1330
1331 *xfered_len = len;
1332 return TARGET_XFER_UNAVAILABLE;
1333 }
1334 }
1335 }
1336
1337 /* Forward the request. */
1338 ops = ops->beneath;
1339 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1340 offset, len, xfered_len);
1341 }
1342
1343 /* The to_insert_breakpoint method of target record-btrace. */
1344
1345 static int
1346 record_btrace_insert_breakpoint (struct target_ops *ops,
1347 struct gdbarch *gdbarch,
1348 struct bp_target_info *bp_tgt)
1349 {
1350 const char *old;
1351 int ret;
1352
1353 /* Inserting breakpoints requires accessing memory. Allow it for the
1354 duration of this function. */
1355 old = replay_memory_access;
1356 replay_memory_access = replay_memory_access_read_write;
1357
1358 ret = 0;
1359 TRY
1360 {
1361 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1362 }
1363 CATCH (except, RETURN_MASK_ALL)
1364 {
1365 replay_memory_access = old;
1366 throw_exception (except);
1367 }
1368 END_CATCH
1369 replay_memory_access = old;
1370
1371 return ret;
1372 }
1373
1374 /* The to_remove_breakpoint method of target record-btrace. */
1375
1376 static int
1377 record_btrace_remove_breakpoint (struct target_ops *ops,
1378 struct gdbarch *gdbarch,
1379 struct bp_target_info *bp_tgt,
1380 enum remove_bp_reason reason)
1381 {
1382 const char *old;
1383 int ret;
1384
1385 /* Removing breakpoints requires accessing memory. Allow it for the
1386 duration of this function. */
1387 old = replay_memory_access;
1388 replay_memory_access = replay_memory_access_read_write;
1389
1390 ret = 0;
1391 TRY
1392 {
1393 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1394 reason);
1395 }
1396 CATCH (except, RETURN_MASK_ALL)
1397 {
1398 replay_memory_access = old;
1399 throw_exception (except);
1400 }
1401 END_CATCH
1402 replay_memory_access = old;
1403
1404 return ret;
1405 }
1406
1407 /* The to_fetch_registers method of target record-btrace. */
1408
1409 static void
1410 record_btrace_fetch_registers (struct target_ops *ops,
1411 struct regcache *regcache, int regno)
1412 {
1413 struct btrace_insn_iterator *replay;
1414 struct thread_info *tp;
1415
1416 tp = find_thread_ptid (regcache_get_ptid (regcache));
1417 gdb_assert (tp != NULL);
1418
1419 replay = tp->btrace.replay;
1420 if (replay != NULL && !record_btrace_generating_corefile)
1421 {
1422 const struct btrace_insn *insn;
1423 struct gdbarch *gdbarch;
1424 int pcreg;
1425
1426 gdbarch = get_regcache_arch (regcache);
1427 pcreg = gdbarch_pc_regnum (gdbarch);
1428 if (pcreg < 0)
1429 return;
1430
1431 /* We can only provide the PC register. */
1432 if (regno >= 0 && regno != pcreg)
1433 return;
1434
1435 insn = btrace_insn_get (replay);
1436 gdb_assert (insn != NULL);
1437
1438 regcache_raw_supply (regcache, regno, &insn->pc);
1439 }
1440 else
1441 {
1442 struct target_ops *t = ops->beneath;
1443
1444 t->to_fetch_registers (t, regcache, regno);
1445 }
1446 }
1447
1448 /* The to_store_registers method of target record-btrace. */
1449
1450 static void
1451 record_btrace_store_registers (struct target_ops *ops,
1452 struct regcache *regcache, int regno)
1453 {
1454 struct target_ops *t;
1455
1456 if (!record_btrace_generating_corefile
1457 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1458 error (_("Cannot write registers while replaying."));
1459
1460 gdb_assert (may_write_registers != 0);
1461
1462 t = ops->beneath;
1463 t->to_store_registers (t, regcache, regno);
1464 }
1465
1466 /* The to_prepare_to_store method of target record-btrace. */
1467
1468 static void
1469 record_btrace_prepare_to_store (struct target_ops *ops,
1470 struct regcache *regcache)
1471 {
1472 struct target_ops *t;
1473
1474 if (!record_btrace_generating_corefile
1475 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1476 return;
1477
1478 t = ops->beneath;
1479 t->to_prepare_to_store (t, regcache);
1480 }
1481
1482 /* The branch trace frame cache. */
1483
1484 struct btrace_frame_cache
1485 {
1486 /* The thread. */
1487 struct thread_info *tp;
1488
1489 /* The frame info. */
1490 struct frame_info *frame;
1491
1492 /* The branch trace function segment. */
1493 const struct btrace_function *bfun;
1494 };
1495
1496 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1497
1498 static htab_t bfcache;
1499
1500 /* hash_f for htab_create_alloc of bfcache. */
1501
1502 static hashval_t
1503 bfcache_hash (const void *arg)
1504 {
1505 const struct btrace_frame_cache *cache
1506 = (const struct btrace_frame_cache *) arg;
1507
1508 return htab_hash_pointer (cache->frame);
1509 }
1510
1511 /* eq_f for htab_create_alloc of bfcache. */
1512
1513 static int
1514 bfcache_eq (const void *arg1, const void *arg2)
1515 {
1516 const struct btrace_frame_cache *cache1
1517 = (const struct btrace_frame_cache *) arg1;
1518 const struct btrace_frame_cache *cache2
1519 = (const struct btrace_frame_cache *) arg2;
1520
1521 return cache1->frame == cache2->frame;
1522 }
1523
1524 /* Create a new btrace frame cache. */
1525
1526 static struct btrace_frame_cache *
1527 bfcache_new (struct frame_info *frame)
1528 {
1529 struct btrace_frame_cache *cache;
1530 void **slot;
1531
1532 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1533 cache->frame = frame;
1534
1535 slot = htab_find_slot (bfcache, cache, INSERT);
1536 gdb_assert (*slot == NULL);
1537 *slot = cache;
1538
1539 return cache;
1540 }
1541
1542 /* Extract the branch trace function from a branch trace frame. */
1543
1544 static const struct btrace_function *
1545 btrace_get_frame_function (struct frame_info *frame)
1546 {
1547 const struct btrace_frame_cache *cache;
1548 const struct btrace_function *bfun;
1549 struct btrace_frame_cache pattern;
1550 void **slot;
1551
1552 pattern.frame = frame;
1553
1554 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1555 if (slot == NULL)
1556 return NULL;
1557
1558 cache = (const struct btrace_frame_cache *) *slot;
1559 return cache->bfun;
1560 }
1561
1562 /* Implement stop_reason method for record_btrace_frame_unwind. */
1563
1564 static enum unwind_stop_reason
1565 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1566 void **this_cache)
1567 {
1568 const struct btrace_frame_cache *cache;
1569 const struct btrace_function *bfun;
1570
1571 cache = (const struct btrace_frame_cache *) *this_cache;
1572 bfun = cache->bfun;
1573 gdb_assert (bfun != NULL);
1574
1575 if (bfun->up == 0)
1576 return UNWIND_UNAVAILABLE;
1577
1578 return UNWIND_NO_REASON;
1579 }
1580
1581 /* Implement this_id method for record_btrace_frame_unwind. */
1582
1583 static void
1584 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1585 struct frame_id *this_id)
1586 {
1587 const struct btrace_frame_cache *cache;
1588 const struct btrace_function *bfun;
1589 struct btrace_call_iterator it;
1590 CORE_ADDR code, special;
1591
1592 cache = (const struct btrace_frame_cache *) *this_cache;
1593
1594 bfun = cache->bfun;
1595 gdb_assert (bfun != NULL);
1596
1597 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1598 bfun = btrace_call_get (&it);
1599
1600 code = get_frame_func (this_frame);
1601 special = bfun->number;
1602
1603 *this_id = frame_id_build_unavailable_stack_special (code, special);
1604
1605 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1606 btrace_get_bfun_name (cache->bfun),
1607 core_addr_to_string_nz (this_id->code_addr),
1608 core_addr_to_string_nz (this_id->special_addr));
1609 }
1610
1611 /* Implement prev_register method for record_btrace_frame_unwind. */
1612
1613 static struct value *
1614 record_btrace_frame_prev_register (struct frame_info *this_frame,
1615 void **this_cache,
1616 int regnum)
1617 {
1618 const struct btrace_frame_cache *cache;
1619 const struct btrace_function *bfun, *caller;
1620 struct btrace_call_iterator it;
1621 struct gdbarch *gdbarch;
1622 CORE_ADDR pc;
1623 int pcreg;
1624
1625 gdbarch = get_frame_arch (this_frame);
1626 pcreg = gdbarch_pc_regnum (gdbarch);
1627 if (pcreg < 0 || regnum != pcreg)
1628 throw_error (NOT_AVAILABLE_ERROR,
1629 _("Registers are not available in btrace record history"));
1630
1631 cache = (const struct btrace_frame_cache *) *this_cache;
1632 bfun = cache->bfun;
1633 gdb_assert (bfun != NULL);
1634
1635 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
1636 throw_error (NOT_AVAILABLE_ERROR,
1637 _("No caller in btrace record history"));
1638
1639 caller = btrace_call_get (&it);
1640
1641 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1642 pc = caller->insn.front ().pc;
1643 else
1644 {
1645 pc = caller->insn.back ().pc;
1646 pc += gdb_insn_length (gdbarch, pc);
1647 }
1648
1649 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1650 btrace_get_bfun_name (bfun), bfun->level,
1651 core_addr_to_string_nz (pc));
1652
1653 return frame_unwind_got_address (this_frame, regnum, pc);
1654 }
1655
1656 /* Implement sniffer method for record_btrace_frame_unwind. */
1657
1658 static int
1659 record_btrace_frame_sniffer (const struct frame_unwind *self,
1660 struct frame_info *this_frame,
1661 void **this_cache)
1662 {
1663 const struct btrace_function *bfun;
1664 struct btrace_frame_cache *cache;
1665 struct thread_info *tp;
1666 struct frame_info *next;
1667
1668 /* THIS_FRAME does not contain a reference to its thread. */
1669 tp = find_thread_ptid (inferior_ptid);
1670 gdb_assert (tp != NULL);
1671
1672 bfun = NULL;
1673 next = get_next_frame (this_frame);
1674 if (next == NULL)
1675 {
1676 const struct btrace_insn_iterator *replay;
1677
1678 replay = tp->btrace.replay;
1679 if (replay != NULL)
1680 bfun = &replay->btinfo->functions[replay->call_index];
1681 }
1682 else
1683 {
1684 const struct btrace_function *callee;
1685 struct btrace_call_iterator it;
1686
1687 callee = btrace_get_frame_function (next);
1688 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1689 return 0;
1690
1691 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1692 return 0;
1693
1694 bfun = btrace_call_get (&it);
1695 }
1696
1697 if (bfun == NULL)
1698 return 0;
1699
1700 DEBUG ("[frame] sniffed frame for %s on level %d",
1701 btrace_get_bfun_name (bfun), bfun->level);
1702
1703 /* This is our frame. Initialize the frame cache. */
1704 cache = bfcache_new (this_frame);
1705 cache->tp = tp;
1706 cache->bfun = bfun;
1707
1708 *this_cache = cache;
1709 return 1;
1710 }
1711
1712 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1713
1714 static int
1715 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1716 struct frame_info *this_frame,
1717 void **this_cache)
1718 {
1719 const struct btrace_function *bfun, *callee;
1720 struct btrace_frame_cache *cache;
1721 struct btrace_call_iterator it;
1722 struct frame_info *next;
1723 struct thread_info *tinfo;
1724
1725 next = get_next_frame (this_frame);
1726 if (next == NULL)
1727 return 0;
1728
1729 callee = btrace_get_frame_function (next);
1730 if (callee == NULL)
1731 return 0;
1732
1733 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1734 return 0;
1735
1736 tinfo = find_thread_ptid (inferior_ptid);
1737 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
1738 return 0;
1739
1740 bfun = btrace_call_get (&it);
1741
1742 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1743 btrace_get_bfun_name (bfun), bfun->level);
1744
1745 /* This is our frame. Initialize the frame cache. */
1746 cache = bfcache_new (this_frame);
1747 cache->tp = tinfo;
1748 cache->bfun = bfun;
1749
1750 *this_cache = cache;
1751 return 1;
1752 }
1753
1754 static void
1755 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1756 {
1757 struct btrace_frame_cache *cache;
1758 void **slot;
1759
1760 cache = (struct btrace_frame_cache *) this_cache;
1761
1762 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1763 gdb_assert (slot != NULL);
1764
1765 htab_remove_elt (bfcache, cache);
1766 }
1767
1768 /* btrace recording does not store previous memory content, neither the stack
1769 frames content. Any unwinding would return errorneous results as the stack
1770 contents no longer matches the changed PC value restored from history.
1771 Therefore this unwinder reports any possibly unwound registers as
1772 <unavailable>. */
1773
1774 const struct frame_unwind record_btrace_frame_unwind =
1775 {
1776 NORMAL_FRAME,
1777 record_btrace_frame_unwind_stop_reason,
1778 record_btrace_frame_this_id,
1779 record_btrace_frame_prev_register,
1780 NULL,
1781 record_btrace_frame_sniffer,
1782 record_btrace_frame_dealloc_cache
1783 };
1784
1785 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1786 {
1787 TAILCALL_FRAME,
1788 record_btrace_frame_unwind_stop_reason,
1789 record_btrace_frame_this_id,
1790 record_btrace_frame_prev_register,
1791 NULL,
1792 record_btrace_tailcall_frame_sniffer,
1793 record_btrace_frame_dealloc_cache
1794 };
1795
1796 /* Implement the to_get_unwinder method. */
1797
1798 static const struct frame_unwind *
1799 record_btrace_to_get_unwinder (struct target_ops *self)
1800 {
1801 return &record_btrace_frame_unwind;
1802 }
1803
1804 /* Implement the to_get_tailcall_unwinder method. */
1805
1806 static const struct frame_unwind *
1807 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1808 {
1809 return &record_btrace_tailcall_frame_unwind;
1810 }
1811
1812 /* Return a human-readable string for FLAG. */
1813
1814 static const char *
1815 btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1816 {
1817 switch (flag)
1818 {
1819 case BTHR_STEP:
1820 return "step";
1821
1822 case BTHR_RSTEP:
1823 return "reverse-step";
1824
1825 case BTHR_CONT:
1826 return "cont";
1827
1828 case BTHR_RCONT:
1829 return "reverse-cont";
1830
1831 case BTHR_STOP:
1832 return "stop";
1833 }
1834
1835 return "<invalid>";
1836 }
1837
1838 /* Indicate that TP should be resumed according to FLAG. */
1839
1840 static void
1841 record_btrace_resume_thread (struct thread_info *tp,
1842 enum btrace_thread_flag flag)
1843 {
1844 struct btrace_thread_info *btinfo;
1845
1846 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
1847 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
1848
1849 btinfo = &tp->btrace;
1850
1851 /* Fetch the latest branch trace. */
1852 btrace_fetch (tp);
1853
1854 /* A resume request overwrites a preceding resume or stop request. */
1855 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
1856 btinfo->flags |= flag;
1857 }
1858
1859 /* Get the current frame for TP. */
1860
1861 static struct frame_info *
1862 get_thread_current_frame (struct thread_info *tp)
1863 {
1864 struct frame_info *frame;
1865 ptid_t old_inferior_ptid;
1866 int executing;
1867
1868 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1869 old_inferior_ptid = inferior_ptid;
1870 inferior_ptid = tp->ptid;
1871
1872 /* Clear the executing flag to allow changes to the current frame.
1873 We are not actually running, yet. We just started a reverse execution
1874 command or a record goto command.
1875 For the latter, EXECUTING is false and this has no effect.
1876 For the former, EXECUTING is true and we're in to_wait, about to
1877 move the thread. Since we need to recompute the stack, we temporarily
1878 set EXECUTING to flase. */
1879 executing = is_executing (inferior_ptid);
1880 set_executing (inferior_ptid, 0);
1881
1882 frame = NULL;
1883 TRY
1884 {
1885 frame = get_current_frame ();
1886 }
1887 CATCH (except, RETURN_MASK_ALL)
1888 {
1889 /* Restore the previous execution state. */
1890 set_executing (inferior_ptid, executing);
1891
1892 /* Restore the previous inferior_ptid. */
1893 inferior_ptid = old_inferior_ptid;
1894
1895 throw_exception (except);
1896 }
1897 END_CATCH
1898
1899 /* Restore the previous execution state. */
1900 set_executing (inferior_ptid, executing);
1901
1902 /* Restore the previous inferior_ptid. */
1903 inferior_ptid = old_inferior_ptid;
1904
1905 return frame;
1906 }
1907
1908 /* Start replaying a thread. */
1909
1910 static struct btrace_insn_iterator *
1911 record_btrace_start_replaying (struct thread_info *tp)
1912 {
1913 struct btrace_insn_iterator *replay;
1914 struct btrace_thread_info *btinfo;
1915
1916 btinfo = &tp->btrace;
1917 replay = NULL;
1918
1919 /* We can't start replaying without trace. */
1920 if (btinfo->functions.empty ())
1921 return NULL;
1922
1923 /* GDB stores the current frame_id when stepping in order to detects steps
1924 into subroutines.
1925 Since frames are computed differently when we're replaying, we need to
1926 recompute those stored frames and fix them up so we can still detect
1927 subroutines after we started replaying. */
1928 TRY
1929 {
1930 struct frame_info *frame;
1931 struct frame_id frame_id;
1932 int upd_step_frame_id, upd_step_stack_frame_id;
1933
1934 /* The current frame without replaying - computed via normal unwind. */
1935 frame = get_thread_current_frame (tp);
1936 frame_id = get_frame_id (frame);
1937
1938 /* Check if we need to update any stepping-related frame id's. */
1939 upd_step_frame_id = frame_id_eq (frame_id,
1940 tp->control.step_frame_id);
1941 upd_step_stack_frame_id = frame_id_eq (frame_id,
1942 tp->control.step_stack_frame_id);
1943
1944 /* We start replaying at the end of the branch trace. This corresponds
1945 to the current instruction. */
1946 replay = XNEW (struct btrace_insn_iterator);
1947 btrace_insn_end (replay, btinfo);
1948
1949 /* Skip gaps at the end of the trace. */
1950 while (btrace_insn_get (replay) == NULL)
1951 {
1952 unsigned int steps;
1953
1954 steps = btrace_insn_prev (replay, 1);
1955 if (steps == 0)
1956 error (_("No trace."));
1957 }
1958
1959 /* We're not replaying, yet. */
1960 gdb_assert (btinfo->replay == NULL);
1961 btinfo->replay = replay;
1962
1963 /* Make sure we're not using any stale registers. */
1964 registers_changed_ptid (tp->ptid);
1965
1966 /* The current frame with replaying - computed via btrace unwind. */
1967 frame = get_thread_current_frame (tp);
1968 frame_id = get_frame_id (frame);
1969
1970 /* Replace stepping related frames where necessary. */
1971 if (upd_step_frame_id)
1972 tp->control.step_frame_id = frame_id;
1973 if (upd_step_stack_frame_id)
1974 tp->control.step_stack_frame_id = frame_id;
1975 }
1976 CATCH (except, RETURN_MASK_ALL)
1977 {
1978 xfree (btinfo->replay);
1979 btinfo->replay = NULL;
1980
1981 registers_changed_ptid (tp->ptid);
1982
1983 throw_exception (except);
1984 }
1985 END_CATCH
1986
1987 return replay;
1988 }
1989
1990 /* Stop replaying a thread. */
1991
1992 static void
1993 record_btrace_stop_replaying (struct thread_info *tp)
1994 {
1995 struct btrace_thread_info *btinfo;
1996
1997 btinfo = &tp->btrace;
1998
1999 xfree (btinfo->replay);
2000 btinfo->replay = NULL;
2001
2002 /* Make sure we're not leaving any stale registers. */
2003 registers_changed_ptid (tp->ptid);
2004 }
2005
2006 /* Stop replaying TP if it is at the end of its execution history. */
2007
2008 static void
2009 record_btrace_stop_replaying_at_end (struct thread_info *tp)
2010 {
2011 struct btrace_insn_iterator *replay, end;
2012 struct btrace_thread_info *btinfo;
2013
2014 btinfo = &tp->btrace;
2015 replay = btinfo->replay;
2016
2017 if (replay == NULL)
2018 return;
2019
2020 btrace_insn_end (&end, btinfo);
2021
2022 if (btrace_insn_cmp (replay, &end) == 0)
2023 record_btrace_stop_replaying (tp);
2024 }
2025
2026 /* The to_resume method of target record-btrace. */
2027
2028 static void
2029 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2030 enum gdb_signal signal)
2031 {
2032 struct thread_info *tp;
2033 enum btrace_thread_flag flag, cflag;
2034
2035 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2036 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2037 step ? "step" : "cont");
2038
2039 /* Store the execution direction of the last resume.
2040
2041 If there is more than one to_resume call, we have to rely on infrun
2042 to not change the execution direction in-between. */
2043 record_btrace_resume_exec_dir = execution_direction;
2044
2045 /* As long as we're not replaying, just forward the request.
2046
2047 For non-stop targets this means that no thread is replaying. In order to
2048 make progress, we may need to explicitly move replaying threads to the end
2049 of their execution history. */
2050 if ((execution_direction != EXEC_REVERSE)
2051 && !record_btrace_is_replaying (ops, minus_one_ptid))
2052 {
2053 ops = ops->beneath;
2054 ops->to_resume (ops, ptid, step, signal);
2055 return;
2056 }
2057
2058 /* Compute the btrace thread flag for the requested move. */
2059 if (execution_direction == EXEC_REVERSE)
2060 {
2061 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2062 cflag = BTHR_RCONT;
2063 }
2064 else
2065 {
2066 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2067 cflag = BTHR_CONT;
2068 }
2069
2070 /* We just indicate the resume intent here. The actual stepping happens in
2071 record_btrace_wait below.
2072
2073 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2074 if (!target_is_non_stop_p ())
2075 {
2076 gdb_assert (ptid_match (inferior_ptid, ptid));
2077
2078 ALL_NON_EXITED_THREADS (tp)
2079 if (ptid_match (tp->ptid, ptid))
2080 {
2081 if (ptid_match (tp->ptid, inferior_ptid))
2082 record_btrace_resume_thread (tp, flag);
2083 else
2084 record_btrace_resume_thread (tp, cflag);
2085 }
2086 }
2087 else
2088 {
2089 ALL_NON_EXITED_THREADS (tp)
2090 if (ptid_match (tp->ptid, ptid))
2091 record_btrace_resume_thread (tp, flag);
2092 }
2093
2094 /* Async support. */
2095 if (target_can_async_p ())
2096 {
2097 target_async (1);
2098 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2099 }
2100 }
2101
2102 /* The to_commit_resume method of target record-btrace. */
2103
2104 static void
2105 record_btrace_commit_resume (struct target_ops *ops)
2106 {
2107 if ((execution_direction != EXEC_REVERSE)
2108 && !record_btrace_is_replaying (ops, minus_one_ptid))
2109 ops->beneath->to_commit_resume (ops->beneath);
2110 }
2111
2112 /* Cancel resuming TP. */
2113
2114 static void
2115 record_btrace_cancel_resume (struct thread_info *tp)
2116 {
2117 enum btrace_thread_flag flags;
2118
2119 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2120 if (flags == 0)
2121 return;
2122
2123 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2124 print_thread_id (tp),
2125 target_pid_to_str (tp->ptid), flags,
2126 btrace_thread_flag_to_str (flags));
2127
2128 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
2129 record_btrace_stop_replaying_at_end (tp);
2130 }
2131
2132 /* Return a target_waitstatus indicating that we ran out of history. */
2133
2134 static struct target_waitstatus
2135 btrace_step_no_history (void)
2136 {
2137 struct target_waitstatus status;
2138
2139 status.kind = TARGET_WAITKIND_NO_HISTORY;
2140
2141 return status;
2142 }
2143
2144 /* Return a target_waitstatus indicating that a step finished. */
2145
2146 static struct target_waitstatus
2147 btrace_step_stopped (void)
2148 {
2149 struct target_waitstatus status;
2150
2151 status.kind = TARGET_WAITKIND_STOPPED;
2152 status.value.sig = GDB_SIGNAL_TRAP;
2153
2154 return status;
2155 }
2156
2157 /* Return a target_waitstatus indicating that a thread was stopped as
2158 requested. */
2159
2160 static struct target_waitstatus
2161 btrace_step_stopped_on_request (void)
2162 {
2163 struct target_waitstatus status;
2164
2165 status.kind = TARGET_WAITKIND_STOPPED;
2166 status.value.sig = GDB_SIGNAL_0;
2167
2168 return status;
2169 }
2170
2171 /* Return a target_waitstatus indicating a spurious stop. */
2172
2173 static struct target_waitstatus
2174 btrace_step_spurious (void)
2175 {
2176 struct target_waitstatus status;
2177
2178 status.kind = TARGET_WAITKIND_SPURIOUS;
2179
2180 return status;
2181 }
2182
2183 /* Return a target_waitstatus indicating that the thread was not resumed. */
2184
2185 static struct target_waitstatus
2186 btrace_step_no_resumed (void)
2187 {
2188 struct target_waitstatus status;
2189
2190 status.kind = TARGET_WAITKIND_NO_RESUMED;
2191
2192 return status;
2193 }
2194
2195 /* Return a target_waitstatus indicating that we should wait again. */
2196
2197 static struct target_waitstatus
2198 btrace_step_again (void)
2199 {
2200 struct target_waitstatus status;
2201
2202 status.kind = TARGET_WAITKIND_IGNORE;
2203
2204 return status;
2205 }
2206
2207 /* Clear the record histories. */
2208
2209 static void
2210 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2211 {
2212 xfree (btinfo->insn_history);
2213 xfree (btinfo->call_history);
2214
2215 btinfo->insn_history = NULL;
2216 btinfo->call_history = NULL;
2217 }
2218
2219 /* Check whether TP's current replay position is at a breakpoint. */
2220
2221 static int
2222 record_btrace_replay_at_breakpoint (struct thread_info *tp)
2223 {
2224 struct btrace_insn_iterator *replay;
2225 struct btrace_thread_info *btinfo;
2226 const struct btrace_insn *insn;
2227 struct inferior *inf;
2228
2229 btinfo = &tp->btrace;
2230 replay = btinfo->replay;
2231
2232 if (replay == NULL)
2233 return 0;
2234
2235 insn = btrace_insn_get (replay);
2236 if (insn == NULL)
2237 return 0;
2238
2239 inf = find_inferior_ptid (tp->ptid);
2240 if (inf == NULL)
2241 return 0;
2242
2243 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2244 &btinfo->stop_reason);
2245 }
2246
2247 /* Step one instruction in forward direction. */
2248
2249 static struct target_waitstatus
2250 record_btrace_single_step_forward (struct thread_info *tp)
2251 {
2252 struct btrace_insn_iterator *replay, end, start;
2253 struct btrace_thread_info *btinfo;
2254
2255 btinfo = &tp->btrace;
2256 replay = btinfo->replay;
2257
2258 /* We're done if we're not replaying. */
2259 if (replay == NULL)
2260 return btrace_step_no_history ();
2261
2262 /* Check if we're stepping a breakpoint. */
2263 if (record_btrace_replay_at_breakpoint (tp))
2264 return btrace_step_stopped ();
2265
2266 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2267 jump back to the instruction at which we started. */
2268 start = *replay;
2269 do
2270 {
2271 unsigned int steps;
2272
2273 /* We will bail out here if we continue stepping after reaching the end
2274 of the execution history. */
2275 steps = btrace_insn_next (replay, 1);
2276 if (steps == 0)
2277 {
2278 *replay = start;
2279 return btrace_step_no_history ();
2280 }
2281 }
2282 while (btrace_insn_get (replay) == NULL);
2283
2284 /* Determine the end of the instruction trace. */
2285 btrace_insn_end (&end, btinfo);
2286
2287 /* The execution trace contains (and ends with) the current instruction.
2288 This instruction has not been executed, yet, so the trace really ends
2289 one instruction earlier. */
2290 if (btrace_insn_cmp (replay, &end) == 0)
2291 return btrace_step_no_history ();
2292
2293 return btrace_step_spurious ();
2294 }
2295
2296 /* Step one instruction in backward direction. */
2297
2298 static struct target_waitstatus
2299 record_btrace_single_step_backward (struct thread_info *tp)
2300 {
2301 struct btrace_insn_iterator *replay, start;
2302 struct btrace_thread_info *btinfo;
2303
2304 btinfo = &tp->btrace;
2305 replay = btinfo->replay;
2306
2307 /* Start replaying if we're not already doing so. */
2308 if (replay == NULL)
2309 replay = record_btrace_start_replaying (tp);
2310
2311 /* If we can't step any further, we reached the end of the history.
2312 Skip gaps during replay. If we end up at a gap (at the beginning of
2313 the trace), jump back to the instruction at which we started. */
2314 start = *replay;
2315 do
2316 {
2317 unsigned int steps;
2318
2319 steps = btrace_insn_prev (replay, 1);
2320 if (steps == 0)
2321 {
2322 *replay = start;
2323 return btrace_step_no_history ();
2324 }
2325 }
2326 while (btrace_insn_get (replay) == NULL);
2327
2328 /* Check if we're stepping a breakpoint.
2329
2330 For reverse-stepping, this check is after the step. There is logic in
2331 infrun.c that handles reverse-stepping separately. See, for example,
2332 proceed and adjust_pc_after_break.
2333
2334 This code assumes that for reverse-stepping, PC points to the last
2335 de-executed instruction, whereas for forward-stepping PC points to the
2336 next to-be-executed instruction. */
2337 if (record_btrace_replay_at_breakpoint (tp))
2338 return btrace_step_stopped ();
2339
2340 return btrace_step_spurious ();
2341 }
2342
2343 /* Step a single thread. */
2344
2345 static struct target_waitstatus
2346 record_btrace_step_thread (struct thread_info *tp)
2347 {
2348 struct btrace_thread_info *btinfo;
2349 struct target_waitstatus status;
2350 enum btrace_thread_flag flags;
2351
2352 btinfo = &tp->btrace;
2353
2354 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2355 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
2356
2357 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
2358 target_pid_to_str (tp->ptid), flags,
2359 btrace_thread_flag_to_str (flags));
2360
2361 /* We can't step without an execution history. */
2362 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2363 return btrace_step_no_history ();
2364
2365 switch (flags)
2366 {
2367 default:
2368 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2369
2370 case BTHR_STOP:
2371 return btrace_step_stopped_on_request ();
2372
2373 case BTHR_STEP:
2374 status = record_btrace_single_step_forward (tp);
2375 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2376 break;
2377
2378 return btrace_step_stopped ();
2379
2380 case BTHR_RSTEP:
2381 status = record_btrace_single_step_backward (tp);
2382 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2383 break;
2384
2385 return btrace_step_stopped ();
2386
2387 case BTHR_CONT:
2388 status = record_btrace_single_step_forward (tp);
2389 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2390 break;
2391
2392 btinfo->flags |= flags;
2393 return btrace_step_again ();
2394
2395 case BTHR_RCONT:
2396 status = record_btrace_single_step_backward (tp);
2397 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2398 break;
2399
2400 btinfo->flags |= flags;
2401 return btrace_step_again ();
2402 }
2403
2404 /* We keep threads moving at the end of their execution history. The to_wait
2405 method will stop the thread for whom the event is reported. */
2406 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2407 btinfo->flags |= flags;
2408
2409 return status;
2410 }
2411
2412 /* A vector of threads. */
2413
2414 typedef struct thread_info * tp_t;
2415 DEF_VEC_P (tp_t);
2416
2417 /* Announce further events if necessary. */
2418
2419 static void
2420 record_btrace_maybe_mark_async_event (const VEC (tp_t) *moving,
2421 const VEC (tp_t) *no_history)
2422 {
2423 int more_moving, more_no_history;
2424
2425 more_moving = !VEC_empty (tp_t, moving);
2426 more_no_history = !VEC_empty (tp_t, no_history);
2427
2428 if (!more_moving && !more_no_history)
2429 return;
2430
2431 if (more_moving)
2432 DEBUG ("movers pending");
2433
2434 if (more_no_history)
2435 DEBUG ("no-history pending");
2436
2437 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2438 }
2439
2440 /* The to_wait method of target record-btrace. */
2441
2442 static ptid_t
2443 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2444 struct target_waitstatus *status, int options)
2445 {
2446 VEC (tp_t) *moving, *no_history;
2447 struct thread_info *tp, *eventing;
2448 struct cleanup *cleanups = make_cleanup (null_cleanup, NULL);
2449
2450 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2451
2452 /* As long as we're not replaying, just forward the request. */
2453 if ((execution_direction != EXEC_REVERSE)
2454 && !record_btrace_is_replaying (ops, minus_one_ptid))
2455 {
2456 ops = ops->beneath;
2457 return ops->to_wait (ops, ptid, status, options);
2458 }
2459
2460 moving = NULL;
2461 no_history = NULL;
2462
2463 make_cleanup (VEC_cleanup (tp_t), &moving);
2464 make_cleanup (VEC_cleanup (tp_t), &no_history);
2465
2466 /* Keep a work list of moving threads. */
2467 ALL_NON_EXITED_THREADS (tp)
2468 if (ptid_match (tp->ptid, ptid)
2469 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2470 VEC_safe_push (tp_t, moving, tp);
2471
2472 if (VEC_empty (tp_t, moving))
2473 {
2474 *status = btrace_step_no_resumed ();
2475
2476 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
2477 target_waitstatus_to_string (status).c_str ());
2478
2479 do_cleanups (cleanups);
2480 return null_ptid;
2481 }
2482
2483 /* Step moving threads one by one, one step each, until either one thread
2484 reports an event or we run out of threads to step.
2485
2486 When stepping more than one thread, chances are that some threads reach
2487 the end of their execution history earlier than others. If we reported
2488 this immediately, all-stop on top of non-stop would stop all threads and
2489 resume the same threads next time. And we would report the same thread
2490 having reached the end of its execution history again.
2491
2492 In the worst case, this would starve the other threads. But even if other
2493 threads would be allowed to make progress, this would result in far too
2494 many intermediate stops.
2495
2496 We therefore delay the reporting of "no execution history" until we have
2497 nothing else to report. By this time, all threads should have moved to
2498 either the beginning or the end of their execution history. There will
2499 be a single user-visible stop. */
2500 eventing = NULL;
2501 while ((eventing == NULL) && !VEC_empty (tp_t, moving))
2502 {
2503 unsigned int ix;
2504
2505 ix = 0;
2506 while ((eventing == NULL) && VEC_iterate (tp_t, moving, ix, tp))
2507 {
2508 *status = record_btrace_step_thread (tp);
2509
2510 switch (status->kind)
2511 {
2512 case TARGET_WAITKIND_IGNORE:
2513 ix++;
2514 break;
2515
2516 case TARGET_WAITKIND_NO_HISTORY:
2517 VEC_safe_push (tp_t, no_history,
2518 VEC_ordered_remove (tp_t, moving, ix));
2519 break;
2520
2521 default:
2522 eventing = VEC_unordered_remove (tp_t, moving, ix);
2523 break;
2524 }
2525 }
2526 }
2527
2528 if (eventing == NULL)
2529 {
2530 /* We started with at least one moving thread. This thread must have
2531 either stopped or reached the end of its execution history.
2532
2533 In the former case, EVENTING must not be NULL.
2534 In the latter case, NO_HISTORY must not be empty. */
2535 gdb_assert (!VEC_empty (tp_t, no_history));
2536
2537 /* We kept threads moving at the end of their execution history. Stop
2538 EVENTING now that we are going to report its stop. */
2539 eventing = VEC_unordered_remove (tp_t, no_history, 0);
2540 eventing->btrace.flags &= ~BTHR_MOVE;
2541
2542 *status = btrace_step_no_history ();
2543 }
2544
2545 gdb_assert (eventing != NULL);
2546
2547 /* We kept threads replaying at the end of their execution history. Stop
2548 replaying EVENTING now that we are going to report its stop. */
2549 record_btrace_stop_replaying_at_end (eventing);
2550
2551 /* Stop all other threads. */
2552 if (!target_is_non_stop_p ())
2553 ALL_NON_EXITED_THREADS (tp)
2554 record_btrace_cancel_resume (tp);
2555
2556 /* In async mode, we need to announce further events. */
2557 if (target_is_async_p ())
2558 record_btrace_maybe_mark_async_event (moving, no_history);
2559
2560 /* Start record histories anew from the current position. */
2561 record_btrace_clear_histories (&eventing->btrace);
2562
2563 /* We moved the replay position but did not update registers. */
2564 registers_changed_ptid (eventing->ptid);
2565
2566 DEBUG ("wait ended by thread %s (%s): %s",
2567 print_thread_id (eventing),
2568 target_pid_to_str (eventing->ptid),
2569 target_waitstatus_to_string (status).c_str ());
2570
2571 do_cleanups (cleanups);
2572 return eventing->ptid;
2573 }
2574
2575 /* The to_stop method of target record-btrace. */
2576
2577 static void
2578 record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2579 {
2580 DEBUG ("stop %s", target_pid_to_str (ptid));
2581
2582 /* As long as we're not replaying, just forward the request. */
2583 if ((execution_direction != EXEC_REVERSE)
2584 && !record_btrace_is_replaying (ops, minus_one_ptid))
2585 {
2586 ops = ops->beneath;
2587 ops->to_stop (ops, ptid);
2588 }
2589 else
2590 {
2591 struct thread_info *tp;
2592
2593 ALL_NON_EXITED_THREADS (tp)
2594 if (ptid_match (tp->ptid, ptid))
2595 {
2596 tp->btrace.flags &= ~BTHR_MOVE;
2597 tp->btrace.flags |= BTHR_STOP;
2598 }
2599 }
2600 }
2601
2602 /* The to_can_execute_reverse method of target record-btrace. */
2603
2604 static int
2605 record_btrace_can_execute_reverse (struct target_ops *self)
2606 {
2607 return 1;
2608 }
2609
2610 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2611
2612 static int
2613 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2614 {
2615 if (record_btrace_is_replaying (ops, minus_one_ptid))
2616 {
2617 struct thread_info *tp = inferior_thread ();
2618
2619 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2620 }
2621
2622 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2623 }
2624
2625 /* The to_supports_stopped_by_sw_breakpoint method of target
2626 record-btrace. */
2627
2628 static int
2629 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2630 {
2631 if (record_btrace_is_replaying (ops, minus_one_ptid))
2632 return 1;
2633
2634 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2635 }
2636
2637 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2638
2639 static int
2640 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2641 {
2642 if (record_btrace_is_replaying (ops, minus_one_ptid))
2643 {
2644 struct thread_info *tp = inferior_thread ();
2645
2646 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2647 }
2648
2649 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2650 }
2651
2652 /* The to_supports_stopped_by_hw_breakpoint method of target
2653 record-btrace. */
2654
2655 static int
2656 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2657 {
2658 if (record_btrace_is_replaying (ops, minus_one_ptid))
2659 return 1;
2660
2661 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2662 }
2663
2664 /* The to_update_thread_list method of target record-btrace. */
2665
2666 static void
2667 record_btrace_update_thread_list (struct target_ops *ops)
2668 {
2669 /* We don't add or remove threads during replay. */
2670 if (record_btrace_is_replaying (ops, minus_one_ptid))
2671 return;
2672
2673 /* Forward the request. */
2674 ops = ops->beneath;
2675 ops->to_update_thread_list (ops);
2676 }
2677
2678 /* The to_thread_alive method of target record-btrace. */
2679
2680 static int
2681 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2682 {
2683 /* We don't add or remove threads during replay. */
2684 if (record_btrace_is_replaying (ops, minus_one_ptid))
2685 return find_thread_ptid (ptid) != NULL;
2686
2687 /* Forward the request. */
2688 ops = ops->beneath;
2689 return ops->to_thread_alive (ops, ptid);
2690 }
2691
2692 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2693 is stopped. */
2694
2695 static void
2696 record_btrace_set_replay (struct thread_info *tp,
2697 const struct btrace_insn_iterator *it)
2698 {
2699 struct btrace_thread_info *btinfo;
2700
2701 btinfo = &tp->btrace;
2702
2703 if (it == NULL)
2704 record_btrace_stop_replaying (tp);
2705 else
2706 {
2707 if (btinfo->replay == NULL)
2708 record_btrace_start_replaying (tp);
2709 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2710 return;
2711
2712 *btinfo->replay = *it;
2713 registers_changed_ptid (tp->ptid);
2714 }
2715
2716 /* Start anew from the new replay position. */
2717 record_btrace_clear_histories (btinfo);
2718
2719 stop_pc = regcache_read_pc (get_current_regcache ());
2720 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2721 }
2722
2723 /* The to_goto_record_begin method of target record-btrace. */
2724
2725 static void
2726 record_btrace_goto_begin (struct target_ops *self)
2727 {
2728 struct thread_info *tp;
2729 struct btrace_insn_iterator begin;
2730
2731 tp = require_btrace_thread ();
2732
2733 btrace_insn_begin (&begin, &tp->btrace);
2734
2735 /* Skip gaps at the beginning of the trace. */
2736 while (btrace_insn_get (&begin) == NULL)
2737 {
2738 unsigned int steps;
2739
2740 steps = btrace_insn_next (&begin, 1);
2741 if (steps == 0)
2742 error (_("No trace."));
2743 }
2744
2745 record_btrace_set_replay (tp, &begin);
2746 }
2747
2748 /* The to_goto_record_end method of target record-btrace. */
2749
2750 static void
2751 record_btrace_goto_end (struct target_ops *ops)
2752 {
2753 struct thread_info *tp;
2754
2755 tp = require_btrace_thread ();
2756
2757 record_btrace_set_replay (tp, NULL);
2758 }
2759
2760 /* The to_goto_record method of target record-btrace. */
2761
2762 static void
2763 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2764 {
2765 struct thread_info *tp;
2766 struct btrace_insn_iterator it;
2767 unsigned int number;
2768 int found;
2769
2770 number = insn;
2771
2772 /* Check for wrap-arounds. */
2773 if (number != insn)
2774 error (_("Instruction number out of range."));
2775
2776 tp = require_btrace_thread ();
2777
2778 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2779
2780 /* Check if the instruction could not be found or is a gap. */
2781 if (found == 0 || btrace_insn_get (&it) == NULL)
2782 error (_("No such instruction."));
2783
2784 record_btrace_set_replay (tp, &it);
2785 }
2786
2787 /* The to_record_stop_replaying method of target record-btrace. */
2788
2789 static void
2790 record_btrace_stop_replaying_all (struct target_ops *self)
2791 {
2792 struct thread_info *tp;
2793
2794 ALL_NON_EXITED_THREADS (tp)
2795 record_btrace_stop_replaying (tp);
2796 }
2797
2798 /* The to_execution_direction target method. */
2799
2800 static enum exec_direction_kind
2801 record_btrace_execution_direction (struct target_ops *self)
2802 {
2803 return record_btrace_resume_exec_dir;
2804 }
2805
2806 /* The to_prepare_to_generate_core target method. */
2807
2808 static void
2809 record_btrace_prepare_to_generate_core (struct target_ops *self)
2810 {
2811 record_btrace_generating_corefile = 1;
2812 }
2813
2814 /* The to_done_generating_core target method. */
2815
2816 static void
2817 record_btrace_done_generating_core (struct target_ops *self)
2818 {
2819 record_btrace_generating_corefile = 0;
2820 }
2821
2822 /* Initialize the record-btrace target ops. */
2823
2824 static void
2825 init_record_btrace_ops (void)
2826 {
2827 struct target_ops *ops;
2828
2829 ops = &record_btrace_ops;
2830 ops->to_shortname = "record-btrace";
2831 ops->to_longname = "Branch tracing target";
2832 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2833 ops->to_open = record_btrace_open;
2834 ops->to_close = record_btrace_close;
2835 ops->to_async = record_btrace_async;
2836 ops->to_detach = record_detach;
2837 ops->to_disconnect = record_btrace_disconnect;
2838 ops->to_mourn_inferior = record_mourn_inferior;
2839 ops->to_kill = record_kill;
2840 ops->to_stop_recording = record_btrace_stop_recording;
2841 ops->to_info_record = record_btrace_info;
2842 ops->to_insn_history = record_btrace_insn_history;
2843 ops->to_insn_history_from = record_btrace_insn_history_from;
2844 ops->to_insn_history_range = record_btrace_insn_history_range;
2845 ops->to_call_history = record_btrace_call_history;
2846 ops->to_call_history_from = record_btrace_call_history_from;
2847 ops->to_call_history_range = record_btrace_call_history_range;
2848 ops->to_record_method = record_btrace_record_method;
2849 ops->to_record_is_replaying = record_btrace_is_replaying;
2850 ops->to_record_will_replay = record_btrace_will_replay;
2851 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
2852 ops->to_xfer_partial = record_btrace_xfer_partial;
2853 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2854 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2855 ops->to_fetch_registers = record_btrace_fetch_registers;
2856 ops->to_store_registers = record_btrace_store_registers;
2857 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2858 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2859 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2860 ops->to_resume = record_btrace_resume;
2861 ops->to_commit_resume = record_btrace_commit_resume;
2862 ops->to_wait = record_btrace_wait;
2863 ops->to_stop = record_btrace_stop;
2864 ops->to_update_thread_list = record_btrace_update_thread_list;
2865 ops->to_thread_alive = record_btrace_thread_alive;
2866 ops->to_goto_record_begin = record_btrace_goto_begin;
2867 ops->to_goto_record_end = record_btrace_goto_end;
2868 ops->to_goto_record = record_btrace_goto;
2869 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2870 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2871 ops->to_supports_stopped_by_sw_breakpoint
2872 = record_btrace_supports_stopped_by_sw_breakpoint;
2873 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2874 ops->to_supports_stopped_by_hw_breakpoint
2875 = record_btrace_supports_stopped_by_hw_breakpoint;
2876 ops->to_execution_direction = record_btrace_execution_direction;
2877 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2878 ops->to_done_generating_core = record_btrace_done_generating_core;
2879 ops->to_stratum = record_stratum;
2880 ops->to_magic = OPS_MAGIC;
2881 }
2882
2883 /* Start recording in BTS format. */
2884
2885 static void
2886 cmd_record_btrace_bts_start (const char *args, int from_tty)
2887 {
2888 if (args != NULL && *args != 0)
2889 error (_("Invalid argument."));
2890
2891 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2892
2893 TRY
2894 {
2895 execute_command ((char *) "target record-btrace", from_tty);
2896 }
2897 CATCH (exception, RETURN_MASK_ALL)
2898 {
2899 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2900 throw_exception (exception);
2901 }
2902 END_CATCH
2903 }
2904
2905 /* Start recording in Intel Processor Trace format. */
2906
2907 static void
2908 cmd_record_btrace_pt_start (const char *args, int from_tty)
2909 {
2910 if (args != NULL && *args != 0)
2911 error (_("Invalid argument."));
2912
2913 record_btrace_conf.format = BTRACE_FORMAT_PT;
2914
2915 TRY
2916 {
2917 execute_command ((char *) "target record-btrace", from_tty);
2918 }
2919 CATCH (exception, RETURN_MASK_ALL)
2920 {
2921 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2922 throw_exception (exception);
2923 }
2924 END_CATCH
2925 }
2926
2927 /* Alias for "target record". */
2928
2929 static void
2930 cmd_record_btrace_start (char *args, int from_tty)
2931 {
2932 if (args != NULL && *args != 0)
2933 error (_("Invalid argument."));
2934
2935 record_btrace_conf.format = BTRACE_FORMAT_PT;
2936
2937 TRY
2938 {
2939 execute_command ((char *) "target record-btrace", from_tty);
2940 }
2941 CATCH (exception, RETURN_MASK_ALL)
2942 {
2943 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2944
2945 TRY
2946 {
2947 execute_command ((char *) "target record-btrace", from_tty);
2948 }
2949 CATCH (exception, RETURN_MASK_ALL)
2950 {
2951 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2952 throw_exception (exception);
2953 }
2954 END_CATCH
2955 }
2956 END_CATCH
2957 }
2958
2959 /* The "set record btrace" command. */
2960
2961 static void
2962 cmd_set_record_btrace (char *args, int from_tty)
2963 {
2964 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2965 }
2966
2967 /* The "show record btrace" command. */
2968
2969 static void
2970 cmd_show_record_btrace (char *args, int from_tty)
2971 {
2972 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2973 }
2974
2975 /* The "show record btrace replay-memory-access" command. */
2976
2977 static void
2978 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2979 struct cmd_list_element *c, const char *value)
2980 {
2981 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2982 replay_memory_access);
2983 }
2984
2985 /* The "set record btrace bts" command. */
2986
2987 static void
2988 cmd_set_record_btrace_bts (char *args, int from_tty)
2989 {
2990 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2991 "by an appropriate subcommand.\n"));
2992 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2993 all_commands, gdb_stdout);
2994 }
2995
2996 /* The "show record btrace bts" command. */
2997
2998 static void
2999 cmd_show_record_btrace_bts (char *args, int from_tty)
3000 {
3001 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3002 }
3003
3004 /* The "set record btrace pt" command. */
3005
3006 static void
3007 cmd_set_record_btrace_pt (char *args, int from_tty)
3008 {
3009 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3010 "by an appropriate subcommand.\n"));
3011 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3012 all_commands, gdb_stdout);
3013 }
3014
3015 /* The "show record btrace pt" command. */
3016
3017 static void
3018 cmd_show_record_btrace_pt (char *args, int from_tty)
3019 {
3020 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3021 }
3022
3023 /* The "record bts buffer-size" show value function. */
3024
3025 static void
3026 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3027 struct cmd_list_element *c,
3028 const char *value)
3029 {
3030 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3031 value);
3032 }
3033
3034 /* The "record pt buffer-size" show value function. */
3035
3036 static void
3037 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3038 struct cmd_list_element *c,
3039 const char *value)
3040 {
3041 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3042 value);
3043 }
3044
3045 /* Initialize btrace commands. */
3046
3047 void
3048 _initialize_record_btrace (void)
3049 {
3050 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3051 _("Start branch trace recording."), &record_btrace_cmdlist,
3052 "record btrace ", 0, &record_cmdlist);
3053 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3054
3055 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3056 _("\
3057 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3058 The processor stores a from/to record for each branch into a cyclic buffer.\n\
3059 This format may not be available on all processors."),
3060 &record_btrace_cmdlist);
3061 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3062
3063 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3064 _("\
3065 Start branch trace recording in Intel Processor Trace format.\n\n\
3066 This format may not be available on all processors."),
3067 &record_btrace_cmdlist);
3068 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3069
3070 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3071 _("Set record options"), &set_record_btrace_cmdlist,
3072 "set record btrace ", 0, &set_record_cmdlist);
3073
3074 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3075 _("Show record options"), &show_record_btrace_cmdlist,
3076 "show record btrace ", 0, &show_record_cmdlist);
3077
3078 add_setshow_enum_cmd ("replay-memory-access", no_class,
3079 replay_memory_access_types, &replay_memory_access, _("\
3080 Set what memory accesses are allowed during replay."), _("\
3081 Show what memory accesses are allowed during replay."),
3082 _("Default is READ-ONLY.\n\n\
3083 The btrace record target does not trace data.\n\
3084 The memory therefore corresponds to the live target and not \
3085 to the current replay position.\n\n\
3086 When READ-ONLY, allow accesses to read-only memory during replay.\n\
3087 When READ-WRITE, allow accesses to read-only and read-write memory during \
3088 replay."),
3089 NULL, cmd_show_replay_memory_access,
3090 &set_record_btrace_cmdlist,
3091 &show_record_btrace_cmdlist);
3092
3093 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3094 _("Set record btrace bts options"),
3095 &set_record_btrace_bts_cmdlist,
3096 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3097
3098 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3099 _("Show record btrace bts options"),
3100 &show_record_btrace_bts_cmdlist,
3101 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3102
3103 add_setshow_uinteger_cmd ("buffer-size", no_class,
3104 &record_btrace_conf.bts.size,
3105 _("Set the record/replay bts buffer size."),
3106 _("Show the record/replay bts buffer size."), _("\
3107 When starting recording request a trace buffer of this size. \
3108 The actual buffer size may differ from the requested size. \
3109 Use \"info record\" to see the actual buffer size.\n\n\
3110 Bigger buffers allow longer recording but also take more time to process \
3111 the recorded execution trace.\n\n\
3112 The trace buffer size may not be changed while recording."), NULL,
3113 show_record_bts_buffer_size_value,
3114 &set_record_btrace_bts_cmdlist,
3115 &show_record_btrace_bts_cmdlist);
3116
3117 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3118 _("Set record btrace pt options"),
3119 &set_record_btrace_pt_cmdlist,
3120 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3121
3122 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3123 _("Show record btrace pt options"),
3124 &show_record_btrace_pt_cmdlist,
3125 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3126
3127 add_setshow_uinteger_cmd ("buffer-size", no_class,
3128 &record_btrace_conf.pt.size,
3129 _("Set the record/replay pt buffer size."),
3130 _("Show the record/replay pt buffer size."), _("\
3131 Bigger buffers allow longer recording but also take more time to process \
3132 the recorded execution.\n\
3133 The actual buffer size may differ from the requested size. Use \"info record\" \
3134 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3135 &set_record_btrace_pt_cmdlist,
3136 &show_record_btrace_pt_cmdlist);
3137
3138 init_record_btrace_ops ();
3139 add_target (&record_btrace_ops);
3140
3141 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3142 xcalloc, xfree);
3143
3144 record_btrace_conf.bts.size = 64 * 1024;
3145 record_btrace_conf.pt.size = 16 * 1024;
3146 }