]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/record-btrace.c
Replace some xmalloc-family functions with XNEW-family ones
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "record.h"
24 #include "gdbthread.h"
25 #include "target.h"
26 #include "gdbcmd.h"
27 #include "disasm.h"
28 #include "observer.h"
29 #include "cli/cli-utils.h"
30 #include "source.h"
31 #include "ui-out.h"
32 #include "symtab.h"
33 #include "filenames.h"
34 #include "regcache.h"
35 #include "frame-unwind.h"
36 #include "hashtab.h"
37 #include "infrun.h"
38 #include "event-loop.h"
39 #include "inf-loop.h"
40
41 /* The target_ops of record-btrace. */
42 static struct target_ops record_btrace_ops;
43
44 /* A new thread observer enabling branch tracing for the new thread. */
45 static struct observer *record_btrace_thread_observer;
46
47 /* Memory access types used in set/show record btrace replay-memory-access. */
48 static const char replay_memory_access_read_only[] = "read-only";
49 static const char replay_memory_access_read_write[] = "read-write";
50 static const char *const replay_memory_access_types[] =
51 {
52 replay_memory_access_read_only,
53 replay_memory_access_read_write,
54 NULL
55 };
56
57 /* The currently allowed replay memory access type. */
58 static const char *replay_memory_access = replay_memory_access_read_only;
59
60 /* Command lists for "set/show record btrace". */
61 static struct cmd_list_element *set_record_btrace_cmdlist;
62 static struct cmd_list_element *show_record_btrace_cmdlist;
63
64 /* The execution direction of the last resume we got. See record-full.c. */
65 static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
66
67 /* The async event handler for reverse/replay execution. */
68 static struct async_event_handler *record_btrace_async_inferior_event_handler;
69
70 /* A flag indicating that we are currently generating a core file. */
71 static int record_btrace_generating_corefile;
72
73 /* The current branch trace configuration. */
74 static struct btrace_config record_btrace_conf;
75
76 /* Command list for "record btrace". */
77 static struct cmd_list_element *record_btrace_cmdlist;
78
79 /* Command lists for "set/show record btrace bts". */
80 static struct cmd_list_element *set_record_btrace_bts_cmdlist;
81 static struct cmd_list_element *show_record_btrace_bts_cmdlist;
82
83 /* Command lists for "set/show record btrace pt". */
84 static struct cmd_list_element *set_record_btrace_pt_cmdlist;
85 static struct cmd_list_element *show_record_btrace_pt_cmdlist;
86
87 /* Print a record-btrace debug message. Use do ... while (0) to avoid
88 ambiguities when used in if statements. */
89
90 #define DEBUG(msg, args...) \
91 do \
92 { \
93 if (record_debug != 0) \
94 fprintf_unfiltered (gdb_stdlog, \
95 "[record-btrace] " msg "\n", ##args); \
96 } \
97 while (0)
98
99
100 /* Update the branch trace for the current thread and return a pointer to its
101 thread_info.
102
103 Throws an error if there is no thread or no trace. This function never
104 returns NULL. */
105
106 static struct thread_info *
107 require_btrace_thread (void)
108 {
109 struct thread_info *tp;
110
111 DEBUG ("require");
112
113 tp = find_thread_ptid (inferior_ptid);
114 if (tp == NULL)
115 error (_("No thread."));
116
117 btrace_fetch (tp);
118
119 if (btrace_is_empty (tp))
120 error (_("No trace."));
121
122 return tp;
123 }
124
125 /* Update the branch trace for the current thread and return a pointer to its
126 branch trace information struct.
127
128 Throws an error if there is no thread or no trace. This function never
129 returns NULL. */
130
131 static struct btrace_thread_info *
132 require_btrace (void)
133 {
134 struct thread_info *tp;
135
136 tp = require_btrace_thread ();
137
138 return &tp->btrace;
139 }
140
141 /* Enable branch tracing for one thread. Warn on errors. */
142
143 static void
144 record_btrace_enable_warn (struct thread_info *tp)
145 {
146 TRY
147 {
148 btrace_enable (tp, &record_btrace_conf);
149 }
150 CATCH (error, RETURN_MASK_ERROR)
151 {
152 warning ("%s", error.message);
153 }
154 END_CATCH
155 }
156
157 /* Callback function to disable branch tracing for one thread. */
158
159 static void
160 record_btrace_disable_callback (void *arg)
161 {
162 struct thread_info *tp;
163
164 tp = arg;
165
166 btrace_disable (tp);
167 }
168
169 /* Enable automatic tracing of new threads. */
170
171 static void
172 record_btrace_auto_enable (void)
173 {
174 DEBUG ("attach thread observer");
175
176 record_btrace_thread_observer
177 = observer_attach_new_thread (record_btrace_enable_warn);
178 }
179
180 /* Disable automatic tracing of new threads. */
181
182 static void
183 record_btrace_auto_disable (void)
184 {
185 /* The observer may have been detached, already. */
186 if (record_btrace_thread_observer == NULL)
187 return;
188
189 DEBUG ("detach thread observer");
190
191 observer_detach_new_thread (record_btrace_thread_observer);
192 record_btrace_thread_observer = NULL;
193 }
194
195 /* The record-btrace async event handler function. */
196
197 static void
198 record_btrace_handle_async_inferior_event (gdb_client_data data)
199 {
200 inferior_event_handler (INF_REG_EVENT, NULL);
201 }
202
203 /* The to_open method of target record-btrace. */
204
205 static void
206 record_btrace_open (const char *args, int from_tty)
207 {
208 struct cleanup *disable_chain;
209 struct thread_info *tp;
210
211 DEBUG ("open");
212
213 record_preopen ();
214
215 if (!target_has_execution)
216 error (_("The program is not being run."));
217
218 if (non_stop)
219 error (_("Record btrace can't debug inferior in non-stop mode."));
220
221 gdb_assert (record_btrace_thread_observer == NULL);
222
223 disable_chain = make_cleanup (null_cleanup, NULL);
224 ALL_NON_EXITED_THREADS (tp)
225 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
226 {
227 btrace_enable (tp, &record_btrace_conf);
228
229 make_cleanup (record_btrace_disable_callback, tp);
230 }
231
232 record_btrace_auto_enable ();
233
234 push_target (&record_btrace_ops);
235
236 record_btrace_async_inferior_event_handler
237 = create_async_event_handler (record_btrace_handle_async_inferior_event,
238 NULL);
239 record_btrace_generating_corefile = 0;
240
241 observer_notify_record_changed (current_inferior (), 1);
242
243 discard_cleanups (disable_chain);
244 }
245
246 /* The to_stop_recording method of target record-btrace. */
247
248 static void
249 record_btrace_stop_recording (struct target_ops *self)
250 {
251 struct thread_info *tp;
252
253 DEBUG ("stop recording");
254
255 record_btrace_auto_disable ();
256
257 ALL_NON_EXITED_THREADS (tp)
258 if (tp->btrace.target != NULL)
259 btrace_disable (tp);
260 }
261
262 /* The to_close method of target record-btrace. */
263
264 static void
265 record_btrace_close (struct target_ops *self)
266 {
267 struct thread_info *tp;
268
269 if (record_btrace_async_inferior_event_handler != NULL)
270 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
271
272 /* Make sure automatic recording gets disabled even if we did not stop
273 recording before closing the record-btrace target. */
274 record_btrace_auto_disable ();
275
276 /* We should have already stopped recording.
277 Tear down btrace in case we have not. */
278 ALL_NON_EXITED_THREADS (tp)
279 btrace_teardown (tp);
280 }
281
282 /* The to_async method of target record-btrace. */
283
284 static void
285 record_btrace_async (struct target_ops *ops, int enable)
286 {
287 if (enable)
288 mark_async_event_handler (record_btrace_async_inferior_event_handler);
289 else
290 clear_async_event_handler (record_btrace_async_inferior_event_handler);
291
292 ops->beneath->to_async (ops->beneath, enable);
293 }
294
295 /* Adjusts the size and returns a human readable size suffix. */
296
297 static const char *
298 record_btrace_adjust_size (unsigned int *size)
299 {
300 unsigned int sz;
301
302 sz = *size;
303
304 if ((sz & ((1u << 30) - 1)) == 0)
305 {
306 *size = sz >> 30;
307 return "GB";
308 }
309 else if ((sz & ((1u << 20) - 1)) == 0)
310 {
311 *size = sz >> 20;
312 return "MB";
313 }
314 else if ((sz & ((1u << 10) - 1)) == 0)
315 {
316 *size = sz >> 10;
317 return "kB";
318 }
319 else
320 return "";
321 }
322
323 /* Print a BTS configuration. */
324
325 static void
326 record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
327 {
328 const char *suffix;
329 unsigned int size;
330
331 size = conf->size;
332 if (size > 0)
333 {
334 suffix = record_btrace_adjust_size (&size);
335 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
336 }
337 }
338
339 /* Print an Intel(R) Processor Trace configuration. */
340
341 static void
342 record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
343 {
344 const char *suffix;
345 unsigned int size;
346
347 size = conf->size;
348 if (size > 0)
349 {
350 suffix = record_btrace_adjust_size (&size);
351 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
352 }
353 }
354
355 /* Print a branch tracing configuration. */
356
357 static void
358 record_btrace_print_conf (const struct btrace_config *conf)
359 {
360 printf_unfiltered (_("Recording format: %s.\n"),
361 btrace_format_string (conf->format));
362
363 switch (conf->format)
364 {
365 case BTRACE_FORMAT_NONE:
366 return;
367
368 case BTRACE_FORMAT_BTS:
369 record_btrace_print_bts_conf (&conf->bts);
370 return;
371
372 case BTRACE_FORMAT_PT:
373 record_btrace_print_pt_conf (&conf->pt);
374 return;
375 }
376
377 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
378 }
379
380 /* The to_info_record method of target record-btrace. */
381
382 static void
383 record_btrace_info (struct target_ops *self)
384 {
385 struct btrace_thread_info *btinfo;
386 const struct btrace_config *conf;
387 struct thread_info *tp;
388 unsigned int insns, calls, gaps;
389
390 DEBUG ("info");
391
392 tp = find_thread_ptid (inferior_ptid);
393 if (tp == NULL)
394 error (_("No thread."));
395
396 btinfo = &tp->btrace;
397
398 conf = btrace_conf (btinfo);
399 if (conf != NULL)
400 record_btrace_print_conf (conf);
401
402 btrace_fetch (tp);
403
404 insns = 0;
405 calls = 0;
406 gaps = 0;
407
408 if (!btrace_is_empty (tp))
409 {
410 struct btrace_call_iterator call;
411 struct btrace_insn_iterator insn;
412
413 btrace_call_end (&call, btinfo);
414 btrace_call_prev (&call, 1);
415 calls = btrace_call_number (&call);
416
417 btrace_insn_end (&insn, btinfo);
418
419 insns = btrace_insn_number (&insn);
420 if (insns != 0)
421 {
422 /* The last instruction does not really belong to the trace. */
423 insns -= 1;
424 }
425 else
426 {
427 unsigned int steps;
428
429 /* Skip gaps at the end. */
430 do
431 {
432 steps = btrace_insn_prev (&insn, 1);
433 if (steps == 0)
434 break;
435
436 insns = btrace_insn_number (&insn);
437 }
438 while (insns == 0);
439 }
440
441 gaps = btinfo->ngaps;
442 }
443
444 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
445 "for thread %d (%s).\n"), insns, calls, gaps,
446 tp->num, target_pid_to_str (tp->ptid));
447
448 if (btrace_is_replaying (tp))
449 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
450 btrace_insn_number (btinfo->replay));
451 }
452
453 /* Print a decode error. */
454
455 static void
456 btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
457 enum btrace_format format)
458 {
459 const char *errstr;
460 int is_error;
461
462 errstr = _("unknown");
463 is_error = 1;
464
465 switch (format)
466 {
467 default:
468 break;
469
470 case BTRACE_FORMAT_BTS:
471 switch (errcode)
472 {
473 default:
474 break;
475
476 case BDE_BTS_OVERFLOW:
477 errstr = _("instruction overflow");
478 break;
479
480 case BDE_BTS_INSN_SIZE:
481 errstr = _("unknown instruction");
482 break;
483 }
484 break;
485
486 #if defined (HAVE_LIBIPT)
487 case BTRACE_FORMAT_PT:
488 switch (errcode)
489 {
490 case BDE_PT_USER_QUIT:
491 is_error = 0;
492 errstr = _("trace decode cancelled");
493 break;
494
495 case BDE_PT_DISABLED:
496 is_error = 0;
497 errstr = _("disabled");
498 break;
499
500 case BDE_PT_OVERFLOW:
501 is_error = 0;
502 errstr = _("overflow");
503 break;
504
505 default:
506 if (errcode < 0)
507 errstr = pt_errstr (pt_errcode (errcode));
508 break;
509 }
510 break;
511 #endif /* defined (HAVE_LIBIPT) */
512 }
513
514 ui_out_text (uiout, _("["));
515 if (is_error)
516 {
517 ui_out_text (uiout, _("decode error ("));
518 ui_out_field_int (uiout, "errcode", errcode);
519 ui_out_text (uiout, _("): "));
520 }
521 ui_out_text (uiout, errstr);
522 ui_out_text (uiout, _("]\n"));
523 }
524
525 /* Print an unsigned int. */
526
527 static void
528 ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
529 {
530 ui_out_field_fmt (uiout, fld, "%u", val);
531 }
532
533 /* Disassemble a section of the recorded instruction trace. */
534
535 static void
536 btrace_insn_history (struct ui_out *uiout,
537 const struct btrace_thread_info *btinfo,
538 const struct btrace_insn_iterator *begin,
539 const struct btrace_insn_iterator *end, int flags)
540 {
541 struct gdbarch *gdbarch;
542 struct btrace_insn_iterator it;
543
544 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
545 btrace_insn_number (end));
546
547 gdbarch = target_gdbarch ();
548
549 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
550 {
551 const struct btrace_insn *insn;
552
553 insn = btrace_insn_get (&it);
554
555 /* A NULL instruction indicates a gap in the trace. */
556 if (insn == NULL)
557 {
558 const struct btrace_config *conf;
559
560 conf = btrace_conf (btinfo);
561
562 /* We have trace so we must have a configuration. */
563 gdb_assert (conf != NULL);
564
565 btrace_ui_out_decode_error (uiout, it.function->errcode,
566 conf->format);
567 }
568 else
569 {
570 char prefix[4];
571
572 /* We may add a speculation prefix later. We use the same space
573 that is used for the pc prefix. */
574 if ((flags & DISASSEMBLY_OMIT_PC) == 0)
575 strncpy (prefix, pc_prefix (insn->pc), 3);
576 else
577 {
578 prefix[0] = ' ';
579 prefix[1] = ' ';
580 prefix[2] = ' ';
581 }
582 prefix[3] = 0;
583
584 /* Print the instruction index. */
585 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
586 ui_out_text (uiout, "\t");
587
588 /* Indicate speculative execution by a leading '?'. */
589 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
590 prefix[0] = '?';
591
592 /* Print the prefix; we tell gdb_disassembly below to omit it. */
593 ui_out_field_fmt (uiout, "prefix", "%s", prefix);
594
595 /* Disassembly with '/m' flag may not produce the expected result.
596 See PR gdb/11833. */
597 gdb_disassembly (gdbarch, uiout, NULL, flags | DISASSEMBLY_OMIT_PC,
598 1, insn->pc, insn->pc + 1);
599 }
600 }
601 }
602
603 /* The to_insn_history method of target record-btrace. */
604
605 static void
606 record_btrace_insn_history (struct target_ops *self, int size, int flags)
607 {
608 struct btrace_thread_info *btinfo;
609 struct btrace_insn_history *history;
610 struct btrace_insn_iterator begin, end;
611 struct cleanup *uiout_cleanup;
612 struct ui_out *uiout;
613 unsigned int context, covered;
614
615 uiout = current_uiout;
616 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
617 "insn history");
618 context = abs (size);
619 if (context == 0)
620 error (_("Bad record instruction-history-size."));
621
622 btinfo = require_btrace ();
623 history = btinfo->insn_history;
624 if (history == NULL)
625 {
626 struct btrace_insn_iterator *replay;
627
628 DEBUG ("insn-history (0x%x): %d", flags, size);
629
630 /* If we're replaying, we start at the replay position. Otherwise, we
631 start at the tail of the trace. */
632 replay = btinfo->replay;
633 if (replay != NULL)
634 begin = *replay;
635 else
636 btrace_insn_end (&begin, btinfo);
637
638 /* We start from here and expand in the requested direction. Then we
639 expand in the other direction, as well, to fill up any remaining
640 context. */
641 end = begin;
642 if (size < 0)
643 {
644 /* We want the current position covered, as well. */
645 covered = btrace_insn_next (&end, 1);
646 covered += btrace_insn_prev (&begin, context - covered);
647 covered += btrace_insn_next (&end, context - covered);
648 }
649 else
650 {
651 covered = btrace_insn_next (&end, context);
652 covered += btrace_insn_prev (&begin, context - covered);
653 }
654 }
655 else
656 {
657 begin = history->begin;
658 end = history->end;
659
660 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
661 btrace_insn_number (&begin), btrace_insn_number (&end));
662
663 if (size < 0)
664 {
665 end = begin;
666 covered = btrace_insn_prev (&begin, context);
667 }
668 else
669 {
670 begin = end;
671 covered = btrace_insn_next (&end, context);
672 }
673 }
674
675 if (covered > 0)
676 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
677 else
678 {
679 if (size < 0)
680 printf_unfiltered (_("At the start of the branch trace record.\n"));
681 else
682 printf_unfiltered (_("At the end of the branch trace record.\n"));
683 }
684
685 btrace_set_insn_history (btinfo, &begin, &end);
686 do_cleanups (uiout_cleanup);
687 }
688
689 /* The to_insn_history_range method of target record-btrace. */
690
691 static void
692 record_btrace_insn_history_range (struct target_ops *self,
693 ULONGEST from, ULONGEST to, int flags)
694 {
695 struct btrace_thread_info *btinfo;
696 struct btrace_insn_history *history;
697 struct btrace_insn_iterator begin, end;
698 struct cleanup *uiout_cleanup;
699 struct ui_out *uiout;
700 unsigned int low, high;
701 int found;
702
703 uiout = current_uiout;
704 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
705 "insn history");
706 low = from;
707 high = to;
708
709 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
710
711 /* Check for wrap-arounds. */
712 if (low != from || high != to)
713 error (_("Bad range."));
714
715 if (high < low)
716 error (_("Bad range."));
717
718 btinfo = require_btrace ();
719
720 found = btrace_find_insn_by_number (&begin, btinfo, low);
721 if (found == 0)
722 error (_("Range out of bounds."));
723
724 found = btrace_find_insn_by_number (&end, btinfo, high);
725 if (found == 0)
726 {
727 /* Silently truncate the range. */
728 btrace_insn_end (&end, btinfo);
729 }
730 else
731 {
732 /* We want both begin and end to be inclusive. */
733 btrace_insn_next (&end, 1);
734 }
735
736 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
737 btrace_set_insn_history (btinfo, &begin, &end);
738
739 do_cleanups (uiout_cleanup);
740 }
741
742 /* The to_insn_history_from method of target record-btrace. */
743
744 static void
745 record_btrace_insn_history_from (struct target_ops *self,
746 ULONGEST from, int size, int flags)
747 {
748 ULONGEST begin, end, context;
749
750 context = abs (size);
751 if (context == 0)
752 error (_("Bad record instruction-history-size."));
753
754 if (size < 0)
755 {
756 end = from;
757
758 if (from < context)
759 begin = 0;
760 else
761 begin = from - context + 1;
762 }
763 else
764 {
765 begin = from;
766 end = from + context - 1;
767
768 /* Check for wrap-around. */
769 if (end < begin)
770 end = ULONGEST_MAX;
771 }
772
773 record_btrace_insn_history_range (self, begin, end, flags);
774 }
775
776 /* Print the instruction number range for a function call history line. */
777
778 static void
779 btrace_call_history_insn_range (struct ui_out *uiout,
780 const struct btrace_function *bfun)
781 {
782 unsigned int begin, end, size;
783
784 size = VEC_length (btrace_insn_s, bfun->insn);
785 gdb_assert (size > 0);
786
787 begin = bfun->insn_offset;
788 end = begin + size - 1;
789
790 ui_out_field_uint (uiout, "insn begin", begin);
791 ui_out_text (uiout, ",");
792 ui_out_field_uint (uiout, "insn end", end);
793 }
794
795 /* Compute the lowest and highest source line for the instructions in BFUN
796 and return them in PBEGIN and PEND.
797 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
798 result from inlining or macro expansion. */
799
800 static void
801 btrace_compute_src_line_range (const struct btrace_function *bfun,
802 int *pbegin, int *pend)
803 {
804 struct btrace_insn *insn;
805 struct symtab *symtab;
806 struct symbol *sym;
807 unsigned int idx;
808 int begin, end;
809
810 begin = INT_MAX;
811 end = INT_MIN;
812
813 sym = bfun->sym;
814 if (sym == NULL)
815 goto out;
816
817 symtab = symbol_symtab (sym);
818
819 for (idx = 0; VEC_iterate (btrace_insn_s, bfun->insn, idx, insn); ++idx)
820 {
821 struct symtab_and_line sal;
822
823 sal = find_pc_line (insn->pc, 0);
824 if (sal.symtab != symtab || sal.line == 0)
825 continue;
826
827 begin = min (begin, sal.line);
828 end = max (end, sal.line);
829 }
830
831 out:
832 *pbegin = begin;
833 *pend = end;
834 }
835
836 /* Print the source line information for a function call history line. */
837
838 static void
839 btrace_call_history_src_line (struct ui_out *uiout,
840 const struct btrace_function *bfun)
841 {
842 struct symbol *sym;
843 int begin, end;
844
845 sym = bfun->sym;
846 if (sym == NULL)
847 return;
848
849 ui_out_field_string (uiout, "file",
850 symtab_to_filename_for_display (symbol_symtab (sym)));
851
852 btrace_compute_src_line_range (bfun, &begin, &end);
853 if (end < begin)
854 return;
855
856 ui_out_text (uiout, ":");
857 ui_out_field_int (uiout, "min line", begin);
858
859 if (end == begin)
860 return;
861
862 ui_out_text (uiout, ",");
863 ui_out_field_int (uiout, "max line", end);
864 }
865
866 /* Get the name of a branch trace function. */
867
868 static const char *
869 btrace_get_bfun_name (const struct btrace_function *bfun)
870 {
871 struct minimal_symbol *msym;
872 struct symbol *sym;
873
874 if (bfun == NULL)
875 return "??";
876
877 msym = bfun->msym;
878 sym = bfun->sym;
879
880 if (sym != NULL)
881 return SYMBOL_PRINT_NAME (sym);
882 else if (msym != NULL)
883 return MSYMBOL_PRINT_NAME (msym);
884 else
885 return "??";
886 }
887
888 /* Disassemble a section of the recorded function trace. */
889
890 static void
891 btrace_call_history (struct ui_out *uiout,
892 const struct btrace_thread_info *btinfo,
893 const struct btrace_call_iterator *begin,
894 const struct btrace_call_iterator *end,
895 enum record_print_flag flags)
896 {
897 struct btrace_call_iterator it;
898
899 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
900 btrace_call_number (end));
901
902 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
903 {
904 const struct btrace_function *bfun;
905 struct minimal_symbol *msym;
906 struct symbol *sym;
907
908 bfun = btrace_call_get (&it);
909 sym = bfun->sym;
910 msym = bfun->msym;
911
912 /* Print the function index. */
913 ui_out_field_uint (uiout, "index", bfun->number);
914 ui_out_text (uiout, "\t");
915
916 /* Indicate gaps in the trace. */
917 if (bfun->errcode != 0)
918 {
919 const struct btrace_config *conf;
920
921 conf = btrace_conf (btinfo);
922
923 /* We have trace so we must have a configuration. */
924 gdb_assert (conf != NULL);
925
926 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
927
928 continue;
929 }
930
931 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
932 {
933 int level = bfun->level + btinfo->level, i;
934
935 for (i = 0; i < level; ++i)
936 ui_out_text (uiout, " ");
937 }
938
939 if (sym != NULL)
940 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
941 else if (msym != NULL)
942 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
943 else if (!ui_out_is_mi_like_p (uiout))
944 ui_out_field_string (uiout, "function", "??");
945
946 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
947 {
948 ui_out_text (uiout, _("\tinst "));
949 btrace_call_history_insn_range (uiout, bfun);
950 }
951
952 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
953 {
954 ui_out_text (uiout, _("\tat "));
955 btrace_call_history_src_line (uiout, bfun);
956 }
957
958 ui_out_text (uiout, "\n");
959 }
960 }
961
962 /* The to_call_history method of target record-btrace. */
963
964 static void
965 record_btrace_call_history (struct target_ops *self, int size, int flags)
966 {
967 struct btrace_thread_info *btinfo;
968 struct btrace_call_history *history;
969 struct btrace_call_iterator begin, end;
970 struct cleanup *uiout_cleanup;
971 struct ui_out *uiout;
972 unsigned int context, covered;
973
974 uiout = current_uiout;
975 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
976 "insn history");
977 context = abs (size);
978 if (context == 0)
979 error (_("Bad record function-call-history-size."));
980
981 btinfo = require_btrace ();
982 history = btinfo->call_history;
983 if (history == NULL)
984 {
985 struct btrace_insn_iterator *replay;
986
987 DEBUG ("call-history (0x%x): %d", flags, size);
988
989 /* If we're replaying, we start at the replay position. Otherwise, we
990 start at the tail of the trace. */
991 replay = btinfo->replay;
992 if (replay != NULL)
993 {
994 begin.function = replay->function;
995 begin.btinfo = btinfo;
996 }
997 else
998 btrace_call_end (&begin, btinfo);
999
1000 /* We start from here and expand in the requested direction. Then we
1001 expand in the other direction, as well, to fill up any remaining
1002 context. */
1003 end = begin;
1004 if (size < 0)
1005 {
1006 /* We want the current position covered, as well. */
1007 covered = btrace_call_next (&end, 1);
1008 covered += btrace_call_prev (&begin, context - covered);
1009 covered += btrace_call_next (&end, context - covered);
1010 }
1011 else
1012 {
1013 covered = btrace_call_next (&end, context);
1014 covered += btrace_call_prev (&begin, context- covered);
1015 }
1016 }
1017 else
1018 {
1019 begin = history->begin;
1020 end = history->end;
1021
1022 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
1023 btrace_call_number (&begin), btrace_call_number (&end));
1024
1025 if (size < 0)
1026 {
1027 end = begin;
1028 covered = btrace_call_prev (&begin, context);
1029 }
1030 else
1031 {
1032 begin = end;
1033 covered = btrace_call_next (&end, context);
1034 }
1035 }
1036
1037 if (covered > 0)
1038 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1039 else
1040 {
1041 if (size < 0)
1042 printf_unfiltered (_("At the start of the branch trace record.\n"));
1043 else
1044 printf_unfiltered (_("At the end of the branch trace record.\n"));
1045 }
1046
1047 btrace_set_call_history (btinfo, &begin, &end);
1048 do_cleanups (uiout_cleanup);
1049 }
1050
1051 /* The to_call_history_range method of target record-btrace. */
1052
1053 static void
1054 record_btrace_call_history_range (struct target_ops *self,
1055 ULONGEST from, ULONGEST to, int flags)
1056 {
1057 struct btrace_thread_info *btinfo;
1058 struct btrace_call_history *history;
1059 struct btrace_call_iterator begin, end;
1060 struct cleanup *uiout_cleanup;
1061 struct ui_out *uiout;
1062 unsigned int low, high;
1063 int found;
1064
1065 uiout = current_uiout;
1066 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
1067 "func history");
1068 low = from;
1069 high = to;
1070
1071 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
1072
1073 /* Check for wrap-arounds. */
1074 if (low != from || high != to)
1075 error (_("Bad range."));
1076
1077 if (high < low)
1078 error (_("Bad range."));
1079
1080 btinfo = require_btrace ();
1081
1082 found = btrace_find_call_by_number (&begin, btinfo, low);
1083 if (found == 0)
1084 error (_("Range out of bounds."));
1085
1086 found = btrace_find_call_by_number (&end, btinfo, high);
1087 if (found == 0)
1088 {
1089 /* Silently truncate the range. */
1090 btrace_call_end (&end, btinfo);
1091 }
1092 else
1093 {
1094 /* We want both begin and end to be inclusive. */
1095 btrace_call_next (&end, 1);
1096 }
1097
1098 btrace_call_history (uiout, btinfo, &begin, &end, flags);
1099 btrace_set_call_history (btinfo, &begin, &end);
1100
1101 do_cleanups (uiout_cleanup);
1102 }
1103
1104 /* The to_call_history_from method of target record-btrace. */
1105
1106 static void
1107 record_btrace_call_history_from (struct target_ops *self,
1108 ULONGEST from, int size, int flags)
1109 {
1110 ULONGEST begin, end, context;
1111
1112 context = abs (size);
1113 if (context == 0)
1114 error (_("Bad record function-call-history-size."));
1115
1116 if (size < 0)
1117 {
1118 end = from;
1119
1120 if (from < context)
1121 begin = 0;
1122 else
1123 begin = from - context + 1;
1124 }
1125 else
1126 {
1127 begin = from;
1128 end = from + context - 1;
1129
1130 /* Check for wrap-around. */
1131 if (end < begin)
1132 end = ULONGEST_MAX;
1133 }
1134
1135 record_btrace_call_history_range (self, begin, end, flags);
1136 }
1137
1138 /* The to_record_is_replaying method of target record-btrace. */
1139
1140 static int
1141 record_btrace_is_replaying (struct target_ops *self)
1142 {
1143 struct thread_info *tp;
1144
1145 ALL_NON_EXITED_THREADS (tp)
1146 if (btrace_is_replaying (tp))
1147 return 1;
1148
1149 return 0;
1150 }
1151
1152 /* The to_xfer_partial method of target record-btrace. */
1153
1154 static enum target_xfer_status
1155 record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1156 const char *annex, gdb_byte *readbuf,
1157 const gdb_byte *writebuf, ULONGEST offset,
1158 ULONGEST len, ULONGEST *xfered_len)
1159 {
1160 struct target_ops *t;
1161
1162 /* Filter out requests that don't make sense during replay. */
1163 if (replay_memory_access == replay_memory_access_read_only
1164 && !record_btrace_generating_corefile
1165 && record_btrace_is_replaying (ops))
1166 {
1167 switch (object)
1168 {
1169 case TARGET_OBJECT_MEMORY:
1170 {
1171 struct target_section *section;
1172
1173 /* We do not allow writing memory in general. */
1174 if (writebuf != NULL)
1175 {
1176 *xfered_len = len;
1177 return TARGET_XFER_UNAVAILABLE;
1178 }
1179
1180 /* We allow reading readonly memory. */
1181 section = target_section_by_addr (ops, offset);
1182 if (section != NULL)
1183 {
1184 /* Check if the section we found is readonly. */
1185 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1186 section->the_bfd_section)
1187 & SEC_READONLY) != 0)
1188 {
1189 /* Truncate the request to fit into this section. */
1190 len = min (len, section->endaddr - offset);
1191 break;
1192 }
1193 }
1194
1195 *xfered_len = len;
1196 return TARGET_XFER_UNAVAILABLE;
1197 }
1198 }
1199 }
1200
1201 /* Forward the request. */
1202 ops = ops->beneath;
1203 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1204 offset, len, xfered_len);
1205 }
1206
1207 /* The to_insert_breakpoint method of target record-btrace. */
1208
1209 static int
1210 record_btrace_insert_breakpoint (struct target_ops *ops,
1211 struct gdbarch *gdbarch,
1212 struct bp_target_info *bp_tgt)
1213 {
1214 const char *old;
1215 int ret;
1216
1217 /* Inserting breakpoints requires accessing memory. Allow it for the
1218 duration of this function. */
1219 old = replay_memory_access;
1220 replay_memory_access = replay_memory_access_read_write;
1221
1222 ret = 0;
1223 TRY
1224 {
1225 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1226 }
1227 CATCH (except, RETURN_MASK_ALL)
1228 {
1229 replay_memory_access = old;
1230 throw_exception (except);
1231 }
1232 END_CATCH
1233 replay_memory_access = old;
1234
1235 return ret;
1236 }
1237
1238 /* The to_remove_breakpoint method of target record-btrace. */
1239
1240 static int
1241 record_btrace_remove_breakpoint (struct target_ops *ops,
1242 struct gdbarch *gdbarch,
1243 struct bp_target_info *bp_tgt)
1244 {
1245 const char *old;
1246 int ret;
1247
1248 /* Removing breakpoints requires accessing memory. Allow it for the
1249 duration of this function. */
1250 old = replay_memory_access;
1251 replay_memory_access = replay_memory_access_read_write;
1252
1253 ret = 0;
1254 TRY
1255 {
1256 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
1257 }
1258 CATCH (except, RETURN_MASK_ALL)
1259 {
1260 replay_memory_access = old;
1261 throw_exception (except);
1262 }
1263 END_CATCH
1264 replay_memory_access = old;
1265
1266 return ret;
1267 }
1268
1269 /* The to_fetch_registers method of target record-btrace. */
1270
1271 static void
1272 record_btrace_fetch_registers (struct target_ops *ops,
1273 struct regcache *regcache, int regno)
1274 {
1275 struct btrace_insn_iterator *replay;
1276 struct thread_info *tp;
1277
1278 tp = find_thread_ptid (inferior_ptid);
1279 gdb_assert (tp != NULL);
1280
1281 replay = tp->btrace.replay;
1282 if (replay != NULL && !record_btrace_generating_corefile)
1283 {
1284 const struct btrace_insn *insn;
1285 struct gdbarch *gdbarch;
1286 int pcreg;
1287
1288 gdbarch = get_regcache_arch (regcache);
1289 pcreg = gdbarch_pc_regnum (gdbarch);
1290 if (pcreg < 0)
1291 return;
1292
1293 /* We can only provide the PC register. */
1294 if (regno >= 0 && regno != pcreg)
1295 return;
1296
1297 insn = btrace_insn_get (replay);
1298 gdb_assert (insn != NULL);
1299
1300 regcache_raw_supply (regcache, regno, &insn->pc);
1301 }
1302 else
1303 {
1304 struct target_ops *t = ops->beneath;
1305
1306 t->to_fetch_registers (t, regcache, regno);
1307 }
1308 }
1309
1310 /* The to_store_registers method of target record-btrace. */
1311
1312 static void
1313 record_btrace_store_registers (struct target_ops *ops,
1314 struct regcache *regcache, int regno)
1315 {
1316 struct target_ops *t;
1317
1318 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1319 error (_("This record target does not allow writing registers."));
1320
1321 gdb_assert (may_write_registers != 0);
1322
1323 t = ops->beneath;
1324 t->to_store_registers (t, regcache, regno);
1325 }
1326
1327 /* The to_prepare_to_store method of target record-btrace. */
1328
1329 static void
1330 record_btrace_prepare_to_store (struct target_ops *ops,
1331 struct regcache *regcache)
1332 {
1333 struct target_ops *t;
1334
1335 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1336 return;
1337
1338 t = ops->beneath;
1339 t->to_prepare_to_store (t, regcache);
1340 }
1341
1342 /* The branch trace frame cache. */
1343
1344 struct btrace_frame_cache
1345 {
1346 /* The thread. */
1347 struct thread_info *tp;
1348
1349 /* The frame info. */
1350 struct frame_info *frame;
1351
1352 /* The branch trace function segment. */
1353 const struct btrace_function *bfun;
1354 };
1355
1356 /* A struct btrace_frame_cache hash table indexed by NEXT. */
1357
1358 static htab_t bfcache;
1359
1360 /* hash_f for htab_create_alloc of bfcache. */
1361
1362 static hashval_t
1363 bfcache_hash (const void *arg)
1364 {
1365 const struct btrace_frame_cache *cache = arg;
1366
1367 return htab_hash_pointer (cache->frame);
1368 }
1369
1370 /* eq_f for htab_create_alloc of bfcache. */
1371
1372 static int
1373 bfcache_eq (const void *arg1, const void *arg2)
1374 {
1375 const struct btrace_frame_cache *cache1 = arg1;
1376 const struct btrace_frame_cache *cache2 = arg2;
1377
1378 return cache1->frame == cache2->frame;
1379 }
1380
1381 /* Create a new btrace frame cache. */
1382
1383 static struct btrace_frame_cache *
1384 bfcache_new (struct frame_info *frame)
1385 {
1386 struct btrace_frame_cache *cache;
1387 void **slot;
1388
1389 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1390 cache->frame = frame;
1391
1392 slot = htab_find_slot (bfcache, cache, INSERT);
1393 gdb_assert (*slot == NULL);
1394 *slot = cache;
1395
1396 return cache;
1397 }
1398
1399 /* Extract the branch trace function from a branch trace frame. */
1400
1401 static const struct btrace_function *
1402 btrace_get_frame_function (struct frame_info *frame)
1403 {
1404 const struct btrace_frame_cache *cache;
1405 const struct btrace_function *bfun;
1406 struct btrace_frame_cache pattern;
1407 void **slot;
1408
1409 pattern.frame = frame;
1410
1411 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1412 if (slot == NULL)
1413 return NULL;
1414
1415 cache = *slot;
1416 return cache->bfun;
1417 }
1418
1419 /* Implement stop_reason method for record_btrace_frame_unwind. */
1420
1421 static enum unwind_stop_reason
1422 record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1423 void **this_cache)
1424 {
1425 const struct btrace_frame_cache *cache;
1426 const struct btrace_function *bfun;
1427
1428 cache = *this_cache;
1429 bfun = cache->bfun;
1430 gdb_assert (bfun != NULL);
1431
1432 if (bfun->up == NULL)
1433 return UNWIND_UNAVAILABLE;
1434
1435 return UNWIND_NO_REASON;
1436 }
1437
1438 /* Implement this_id method for record_btrace_frame_unwind. */
1439
1440 static void
1441 record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1442 struct frame_id *this_id)
1443 {
1444 const struct btrace_frame_cache *cache;
1445 const struct btrace_function *bfun;
1446 CORE_ADDR code, special;
1447
1448 cache = *this_cache;
1449
1450 bfun = cache->bfun;
1451 gdb_assert (bfun != NULL);
1452
1453 while (bfun->segment.prev != NULL)
1454 bfun = bfun->segment.prev;
1455
1456 code = get_frame_func (this_frame);
1457 special = bfun->number;
1458
1459 *this_id = frame_id_build_unavailable_stack_special (code, special);
1460
1461 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1462 btrace_get_bfun_name (cache->bfun),
1463 core_addr_to_string_nz (this_id->code_addr),
1464 core_addr_to_string_nz (this_id->special_addr));
1465 }
1466
1467 /* Implement prev_register method for record_btrace_frame_unwind. */
1468
1469 static struct value *
1470 record_btrace_frame_prev_register (struct frame_info *this_frame,
1471 void **this_cache,
1472 int regnum)
1473 {
1474 const struct btrace_frame_cache *cache;
1475 const struct btrace_function *bfun, *caller;
1476 const struct btrace_insn *insn;
1477 struct gdbarch *gdbarch;
1478 CORE_ADDR pc;
1479 int pcreg;
1480
1481 gdbarch = get_frame_arch (this_frame);
1482 pcreg = gdbarch_pc_regnum (gdbarch);
1483 if (pcreg < 0 || regnum != pcreg)
1484 throw_error (NOT_AVAILABLE_ERROR,
1485 _("Registers are not available in btrace record history"));
1486
1487 cache = *this_cache;
1488 bfun = cache->bfun;
1489 gdb_assert (bfun != NULL);
1490
1491 caller = bfun->up;
1492 if (caller == NULL)
1493 throw_error (NOT_AVAILABLE_ERROR,
1494 _("No caller in btrace record history"));
1495
1496 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1497 {
1498 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1499 pc = insn->pc;
1500 }
1501 else
1502 {
1503 insn = VEC_last (btrace_insn_s, caller->insn);
1504 pc = insn->pc;
1505
1506 pc += gdb_insn_length (gdbarch, pc);
1507 }
1508
1509 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1510 btrace_get_bfun_name (bfun), bfun->level,
1511 core_addr_to_string_nz (pc));
1512
1513 return frame_unwind_got_address (this_frame, regnum, pc);
1514 }
1515
1516 /* Implement sniffer method for record_btrace_frame_unwind. */
1517
1518 static int
1519 record_btrace_frame_sniffer (const struct frame_unwind *self,
1520 struct frame_info *this_frame,
1521 void **this_cache)
1522 {
1523 const struct btrace_function *bfun;
1524 struct btrace_frame_cache *cache;
1525 struct thread_info *tp;
1526 struct frame_info *next;
1527
1528 /* THIS_FRAME does not contain a reference to its thread. */
1529 tp = find_thread_ptid (inferior_ptid);
1530 gdb_assert (tp != NULL);
1531
1532 bfun = NULL;
1533 next = get_next_frame (this_frame);
1534 if (next == NULL)
1535 {
1536 const struct btrace_insn_iterator *replay;
1537
1538 replay = tp->btrace.replay;
1539 if (replay != NULL)
1540 bfun = replay->function;
1541 }
1542 else
1543 {
1544 const struct btrace_function *callee;
1545
1546 callee = btrace_get_frame_function (next);
1547 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1548 bfun = callee->up;
1549 }
1550
1551 if (bfun == NULL)
1552 return 0;
1553
1554 DEBUG ("[frame] sniffed frame for %s on level %d",
1555 btrace_get_bfun_name (bfun), bfun->level);
1556
1557 /* This is our frame. Initialize the frame cache. */
1558 cache = bfcache_new (this_frame);
1559 cache->tp = tp;
1560 cache->bfun = bfun;
1561
1562 *this_cache = cache;
1563 return 1;
1564 }
1565
1566 /* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1567
1568 static int
1569 record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1570 struct frame_info *this_frame,
1571 void **this_cache)
1572 {
1573 const struct btrace_function *bfun, *callee;
1574 struct btrace_frame_cache *cache;
1575 struct frame_info *next;
1576
1577 next = get_next_frame (this_frame);
1578 if (next == NULL)
1579 return 0;
1580
1581 callee = btrace_get_frame_function (next);
1582 if (callee == NULL)
1583 return 0;
1584
1585 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1586 return 0;
1587
1588 bfun = callee->up;
1589 if (bfun == NULL)
1590 return 0;
1591
1592 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1593 btrace_get_bfun_name (bfun), bfun->level);
1594
1595 /* This is our frame. Initialize the frame cache. */
1596 cache = bfcache_new (this_frame);
1597 cache->tp = find_thread_ptid (inferior_ptid);
1598 cache->bfun = bfun;
1599
1600 *this_cache = cache;
1601 return 1;
1602 }
1603
1604 static void
1605 record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1606 {
1607 struct btrace_frame_cache *cache;
1608 void **slot;
1609
1610 cache = this_cache;
1611
1612 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1613 gdb_assert (slot != NULL);
1614
1615 htab_remove_elt (bfcache, cache);
1616 }
1617
1618 /* btrace recording does not store previous memory content, neither the stack
1619 frames content. Any unwinding would return errorneous results as the stack
1620 contents no longer matches the changed PC value restored from history.
1621 Therefore this unwinder reports any possibly unwound registers as
1622 <unavailable>. */
1623
1624 const struct frame_unwind record_btrace_frame_unwind =
1625 {
1626 NORMAL_FRAME,
1627 record_btrace_frame_unwind_stop_reason,
1628 record_btrace_frame_this_id,
1629 record_btrace_frame_prev_register,
1630 NULL,
1631 record_btrace_frame_sniffer,
1632 record_btrace_frame_dealloc_cache
1633 };
1634
1635 const struct frame_unwind record_btrace_tailcall_frame_unwind =
1636 {
1637 TAILCALL_FRAME,
1638 record_btrace_frame_unwind_stop_reason,
1639 record_btrace_frame_this_id,
1640 record_btrace_frame_prev_register,
1641 NULL,
1642 record_btrace_tailcall_frame_sniffer,
1643 record_btrace_frame_dealloc_cache
1644 };
1645
1646 /* Implement the to_get_unwinder method. */
1647
1648 static const struct frame_unwind *
1649 record_btrace_to_get_unwinder (struct target_ops *self)
1650 {
1651 return &record_btrace_frame_unwind;
1652 }
1653
1654 /* Implement the to_get_tailcall_unwinder method. */
1655
1656 static const struct frame_unwind *
1657 record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1658 {
1659 return &record_btrace_tailcall_frame_unwind;
1660 }
1661
1662 /* Indicate that TP should be resumed according to FLAG. */
1663
1664 static void
1665 record_btrace_resume_thread (struct thread_info *tp,
1666 enum btrace_thread_flag flag)
1667 {
1668 struct btrace_thread_info *btinfo;
1669
1670 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1671
1672 btinfo = &tp->btrace;
1673
1674 if ((btinfo->flags & BTHR_MOVE) != 0)
1675 error (_("Thread already moving."));
1676
1677 /* Fetch the latest branch trace. */
1678 btrace_fetch (tp);
1679
1680 btinfo->flags |= flag;
1681 }
1682
1683 /* Find the thread to resume given a PTID. */
1684
1685 static struct thread_info *
1686 record_btrace_find_resume_thread (ptid_t ptid)
1687 {
1688 struct thread_info *tp;
1689
1690 /* When asked to resume everything, we pick the current thread. */
1691 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1692 ptid = inferior_ptid;
1693
1694 return find_thread_ptid (ptid);
1695 }
1696
1697 /* Start replaying a thread. */
1698
1699 static struct btrace_insn_iterator *
1700 record_btrace_start_replaying (struct thread_info *tp)
1701 {
1702 struct btrace_insn_iterator *replay;
1703 struct btrace_thread_info *btinfo;
1704 int executing;
1705
1706 btinfo = &tp->btrace;
1707 replay = NULL;
1708
1709 /* We can't start replaying without trace. */
1710 if (btinfo->begin == NULL)
1711 return NULL;
1712
1713 /* Clear the executing flag to allow changes to the current frame.
1714 We are not actually running, yet. We just started a reverse execution
1715 command or a record goto command.
1716 For the latter, EXECUTING is false and this has no effect.
1717 For the former, EXECUTING is true and we're in to_wait, about to
1718 move the thread. Since we need to recompute the stack, we temporarily
1719 set EXECUTING to flase. */
1720 executing = is_executing (tp->ptid);
1721 set_executing (tp->ptid, 0);
1722
1723 /* GDB stores the current frame_id when stepping in order to detects steps
1724 into subroutines.
1725 Since frames are computed differently when we're replaying, we need to
1726 recompute those stored frames and fix them up so we can still detect
1727 subroutines after we started replaying. */
1728 TRY
1729 {
1730 struct frame_info *frame;
1731 struct frame_id frame_id;
1732 int upd_step_frame_id, upd_step_stack_frame_id;
1733
1734 /* The current frame without replaying - computed via normal unwind. */
1735 frame = get_current_frame ();
1736 frame_id = get_frame_id (frame);
1737
1738 /* Check if we need to update any stepping-related frame id's. */
1739 upd_step_frame_id = frame_id_eq (frame_id,
1740 tp->control.step_frame_id);
1741 upd_step_stack_frame_id = frame_id_eq (frame_id,
1742 tp->control.step_stack_frame_id);
1743
1744 /* We start replaying at the end of the branch trace. This corresponds
1745 to the current instruction. */
1746 replay = XNEW (struct btrace_insn_iterator);
1747 btrace_insn_end (replay, btinfo);
1748
1749 /* Skip gaps at the end of the trace. */
1750 while (btrace_insn_get (replay) == NULL)
1751 {
1752 unsigned int steps;
1753
1754 steps = btrace_insn_prev (replay, 1);
1755 if (steps == 0)
1756 error (_("No trace."));
1757 }
1758
1759 /* We're not replaying, yet. */
1760 gdb_assert (btinfo->replay == NULL);
1761 btinfo->replay = replay;
1762
1763 /* Make sure we're not using any stale registers. */
1764 registers_changed_ptid (tp->ptid);
1765
1766 /* The current frame with replaying - computed via btrace unwind. */
1767 frame = get_current_frame ();
1768 frame_id = get_frame_id (frame);
1769
1770 /* Replace stepping related frames where necessary. */
1771 if (upd_step_frame_id)
1772 tp->control.step_frame_id = frame_id;
1773 if (upd_step_stack_frame_id)
1774 tp->control.step_stack_frame_id = frame_id;
1775 }
1776 CATCH (except, RETURN_MASK_ALL)
1777 {
1778 /* Restore the previous execution state. */
1779 set_executing (tp->ptid, executing);
1780
1781 xfree (btinfo->replay);
1782 btinfo->replay = NULL;
1783
1784 registers_changed_ptid (tp->ptid);
1785
1786 throw_exception (except);
1787 }
1788 END_CATCH
1789
1790 /* Restore the previous execution state. */
1791 set_executing (tp->ptid, executing);
1792
1793 return replay;
1794 }
1795
1796 /* Stop replaying a thread. */
1797
1798 static void
1799 record_btrace_stop_replaying (struct thread_info *tp)
1800 {
1801 struct btrace_thread_info *btinfo;
1802
1803 btinfo = &tp->btrace;
1804
1805 xfree (btinfo->replay);
1806 btinfo->replay = NULL;
1807
1808 /* Make sure we're not leaving any stale registers. */
1809 registers_changed_ptid (tp->ptid);
1810 }
1811
1812 /* The to_resume method of target record-btrace. */
1813
1814 static void
1815 record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1816 enum gdb_signal signal)
1817 {
1818 struct thread_info *tp, *other;
1819 enum btrace_thread_flag flag;
1820
1821 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1822
1823 /* Store the execution direction of the last resume. */
1824 record_btrace_resume_exec_dir = execution_direction;
1825
1826 tp = record_btrace_find_resume_thread (ptid);
1827 if (tp == NULL)
1828 error (_("Cannot find thread to resume."));
1829
1830 /* Stop replaying other threads if the thread to resume is not replaying. */
1831 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1832 ALL_NON_EXITED_THREADS (other)
1833 record_btrace_stop_replaying (other);
1834
1835 /* As long as we're not replaying, just forward the request. */
1836 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
1837 {
1838 ops = ops->beneath;
1839 return ops->to_resume (ops, ptid, step, signal);
1840 }
1841
1842 /* Compute the btrace thread flag for the requested move. */
1843 if (step == 0)
1844 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1845 else
1846 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1847
1848 /* At the moment, we only move a single thread. We could also move
1849 all threads in parallel by single-stepping each resumed thread
1850 until the first runs into an event.
1851 When we do that, we would want to continue all other threads.
1852 For now, just resume one thread to not confuse to_wait. */
1853 record_btrace_resume_thread (tp, flag);
1854
1855 /* We just indicate the resume intent here. The actual stepping happens in
1856 record_btrace_wait below. */
1857
1858 /* Async support. */
1859 if (target_can_async_p ())
1860 {
1861 target_async (1);
1862 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1863 }
1864 }
1865
1866 /* Find a thread to move. */
1867
1868 static struct thread_info *
1869 record_btrace_find_thread_to_move (ptid_t ptid)
1870 {
1871 struct thread_info *tp;
1872
1873 /* First check the parameter thread. */
1874 tp = find_thread_ptid (ptid);
1875 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1876 return tp;
1877
1878 /* Otherwise, find one other thread that has been resumed. */
1879 ALL_NON_EXITED_THREADS (tp)
1880 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1881 return tp;
1882
1883 return NULL;
1884 }
1885
1886 /* Return a target_waitstatus indicating that we ran out of history. */
1887
1888 static struct target_waitstatus
1889 btrace_step_no_history (void)
1890 {
1891 struct target_waitstatus status;
1892
1893 status.kind = TARGET_WAITKIND_NO_HISTORY;
1894
1895 return status;
1896 }
1897
1898 /* Return a target_waitstatus indicating that a step finished. */
1899
1900 static struct target_waitstatus
1901 btrace_step_stopped (void)
1902 {
1903 struct target_waitstatus status;
1904
1905 status.kind = TARGET_WAITKIND_STOPPED;
1906 status.value.sig = GDB_SIGNAL_TRAP;
1907
1908 return status;
1909 }
1910
1911 /* Clear the record histories. */
1912
1913 static void
1914 record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1915 {
1916 xfree (btinfo->insn_history);
1917 xfree (btinfo->call_history);
1918
1919 btinfo->insn_history = NULL;
1920 btinfo->call_history = NULL;
1921 }
1922
1923 /* Step a single thread. */
1924
1925 static struct target_waitstatus
1926 record_btrace_step_thread (struct thread_info *tp)
1927 {
1928 struct btrace_insn_iterator *replay, end;
1929 struct btrace_thread_info *btinfo;
1930 struct address_space *aspace;
1931 struct inferior *inf;
1932 enum btrace_thread_flag flags;
1933 unsigned int steps;
1934
1935 /* We can't step without an execution history. */
1936 if (btrace_is_empty (tp))
1937 return btrace_step_no_history ();
1938
1939 btinfo = &tp->btrace;
1940 replay = btinfo->replay;
1941
1942 flags = btinfo->flags & BTHR_MOVE;
1943 btinfo->flags &= ~BTHR_MOVE;
1944
1945 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1946
1947 switch (flags)
1948 {
1949 default:
1950 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1951
1952 case BTHR_STEP:
1953 /* We're done if we're not replaying. */
1954 if (replay == NULL)
1955 return btrace_step_no_history ();
1956
1957 /* Skip gaps during replay. */
1958 do
1959 {
1960 steps = btrace_insn_next (replay, 1);
1961 if (steps == 0)
1962 {
1963 record_btrace_stop_replaying (tp);
1964 return btrace_step_no_history ();
1965 }
1966 }
1967 while (btrace_insn_get (replay) == NULL);
1968
1969 /* Determine the end of the instruction trace. */
1970 btrace_insn_end (&end, btinfo);
1971
1972 /* We stop replaying if we reached the end of the trace. */
1973 if (btrace_insn_cmp (replay, &end) == 0)
1974 record_btrace_stop_replaying (tp);
1975
1976 return btrace_step_stopped ();
1977
1978 case BTHR_RSTEP:
1979 /* Start replaying if we're not already doing so. */
1980 if (replay == NULL)
1981 replay = record_btrace_start_replaying (tp);
1982
1983 /* If we can't step any further, we reached the end of the history.
1984 Skip gaps during replay. */
1985 do
1986 {
1987 steps = btrace_insn_prev (replay, 1);
1988 if (steps == 0)
1989 return btrace_step_no_history ();
1990
1991 }
1992 while (btrace_insn_get (replay) == NULL);
1993
1994 return btrace_step_stopped ();
1995
1996 case BTHR_CONT:
1997 /* We're done if we're not replaying. */
1998 if (replay == NULL)
1999 return btrace_step_no_history ();
2000
2001 inf = find_inferior_ptid (tp->ptid);
2002 aspace = inf->aspace;
2003
2004 /* Determine the end of the instruction trace. */
2005 btrace_insn_end (&end, btinfo);
2006
2007 for (;;)
2008 {
2009 const struct btrace_insn *insn;
2010
2011 /* Skip gaps during replay. */
2012 do
2013 {
2014 steps = btrace_insn_next (replay, 1);
2015 if (steps == 0)
2016 {
2017 record_btrace_stop_replaying (tp);
2018 return btrace_step_no_history ();
2019 }
2020
2021 insn = btrace_insn_get (replay);
2022 }
2023 while (insn == NULL);
2024
2025 /* We stop replaying if we reached the end of the trace. */
2026 if (btrace_insn_cmp (replay, &end) == 0)
2027 {
2028 record_btrace_stop_replaying (tp);
2029 return btrace_step_no_history ();
2030 }
2031
2032 DEBUG ("stepping %d (%s) ... %s", tp->num,
2033 target_pid_to_str (tp->ptid),
2034 core_addr_to_string_nz (insn->pc));
2035
2036 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2037 &btinfo->stop_reason))
2038 return btrace_step_stopped ();
2039 }
2040
2041 case BTHR_RCONT:
2042 /* Start replaying if we're not already doing so. */
2043 if (replay == NULL)
2044 replay = record_btrace_start_replaying (tp);
2045
2046 inf = find_inferior_ptid (tp->ptid);
2047 aspace = inf->aspace;
2048
2049 for (;;)
2050 {
2051 const struct btrace_insn *insn;
2052
2053 /* If we can't step any further, we reached the end of the history.
2054 Skip gaps during replay. */
2055 do
2056 {
2057 steps = btrace_insn_prev (replay, 1);
2058 if (steps == 0)
2059 return btrace_step_no_history ();
2060
2061 insn = btrace_insn_get (replay);
2062 }
2063 while (insn == NULL);
2064
2065 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
2066 target_pid_to_str (tp->ptid),
2067 core_addr_to_string_nz (insn->pc));
2068
2069 if (record_check_stopped_by_breakpoint (aspace, insn->pc,
2070 &btinfo->stop_reason))
2071 return btrace_step_stopped ();
2072 }
2073 }
2074 }
2075
2076 /* The to_wait method of target record-btrace. */
2077
2078 static ptid_t
2079 record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2080 struct target_waitstatus *status, int options)
2081 {
2082 struct thread_info *tp, *other;
2083
2084 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2085
2086 /* As long as we're not replaying, just forward the request. */
2087 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
2088 {
2089 ops = ops->beneath;
2090 return ops->to_wait (ops, ptid, status, options);
2091 }
2092
2093 /* Let's find a thread to move. */
2094 tp = record_btrace_find_thread_to_move (ptid);
2095 if (tp == NULL)
2096 {
2097 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
2098
2099 status->kind = TARGET_WAITKIND_IGNORE;
2100 return minus_one_ptid;
2101 }
2102
2103 /* We only move a single thread. We're not able to correlate threads. */
2104 *status = record_btrace_step_thread (tp);
2105
2106 /* Stop all other threads. */
2107 if (!non_stop)
2108 ALL_NON_EXITED_THREADS (other)
2109 other->btrace.flags &= ~BTHR_MOVE;
2110
2111 /* Start record histories anew from the current position. */
2112 record_btrace_clear_histories (&tp->btrace);
2113
2114 /* We moved the replay position but did not update registers. */
2115 registers_changed_ptid (tp->ptid);
2116
2117 return tp->ptid;
2118 }
2119
2120 /* The to_can_execute_reverse method of target record-btrace. */
2121
2122 static int
2123 record_btrace_can_execute_reverse (struct target_ops *self)
2124 {
2125 return 1;
2126 }
2127
2128 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2129
2130 static int
2131 record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
2132 {
2133 if (record_btrace_is_replaying (ops))
2134 {
2135 struct thread_info *tp = inferior_thread ();
2136
2137 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2138 }
2139
2140 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2141 }
2142
2143 /* The to_supports_stopped_by_sw_breakpoint method of target
2144 record-btrace. */
2145
2146 static int
2147 record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2148 {
2149 if (record_btrace_is_replaying (ops))
2150 return 1;
2151
2152 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2153 }
2154
2155 /* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2156
2157 static int
2158 record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2159 {
2160 if (record_btrace_is_replaying (ops))
2161 {
2162 struct thread_info *tp = inferior_thread ();
2163
2164 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2165 }
2166
2167 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2168 }
2169
2170 /* The to_supports_stopped_by_hw_breakpoint method of target
2171 record-btrace. */
2172
2173 static int
2174 record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2175 {
2176 if (record_btrace_is_replaying (ops))
2177 return 1;
2178
2179 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
2180 }
2181
2182 /* The to_update_thread_list method of target record-btrace. */
2183
2184 static void
2185 record_btrace_update_thread_list (struct target_ops *ops)
2186 {
2187 /* We don't add or remove threads during replay. */
2188 if (record_btrace_is_replaying (ops))
2189 return;
2190
2191 /* Forward the request. */
2192 ops = ops->beneath;
2193 ops->to_update_thread_list (ops);
2194 }
2195
2196 /* The to_thread_alive method of target record-btrace. */
2197
2198 static int
2199 record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2200 {
2201 /* We don't add or remove threads during replay. */
2202 if (record_btrace_is_replaying (ops))
2203 return find_thread_ptid (ptid) != NULL;
2204
2205 /* Forward the request. */
2206 ops = ops->beneath;
2207 return ops->to_thread_alive (ops, ptid);
2208 }
2209
2210 /* Set the replay branch trace instruction iterator. If IT is NULL, replay
2211 is stopped. */
2212
2213 static void
2214 record_btrace_set_replay (struct thread_info *tp,
2215 const struct btrace_insn_iterator *it)
2216 {
2217 struct btrace_thread_info *btinfo;
2218
2219 btinfo = &tp->btrace;
2220
2221 if (it == NULL || it->function == NULL)
2222 record_btrace_stop_replaying (tp);
2223 else
2224 {
2225 if (btinfo->replay == NULL)
2226 record_btrace_start_replaying (tp);
2227 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2228 return;
2229
2230 *btinfo->replay = *it;
2231 registers_changed_ptid (tp->ptid);
2232 }
2233
2234 /* Start anew from the new replay position. */
2235 record_btrace_clear_histories (btinfo);
2236
2237 stop_pc = regcache_read_pc (get_current_regcache ());
2238 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
2239 }
2240
2241 /* The to_goto_record_begin method of target record-btrace. */
2242
2243 static void
2244 record_btrace_goto_begin (struct target_ops *self)
2245 {
2246 struct thread_info *tp;
2247 struct btrace_insn_iterator begin;
2248
2249 tp = require_btrace_thread ();
2250
2251 btrace_insn_begin (&begin, &tp->btrace);
2252 record_btrace_set_replay (tp, &begin);
2253 }
2254
2255 /* The to_goto_record_end method of target record-btrace. */
2256
2257 static void
2258 record_btrace_goto_end (struct target_ops *ops)
2259 {
2260 struct thread_info *tp;
2261
2262 tp = require_btrace_thread ();
2263
2264 record_btrace_set_replay (tp, NULL);
2265 }
2266
2267 /* The to_goto_record method of target record-btrace. */
2268
2269 static void
2270 record_btrace_goto (struct target_ops *self, ULONGEST insn)
2271 {
2272 struct thread_info *tp;
2273 struct btrace_insn_iterator it;
2274 unsigned int number;
2275 int found;
2276
2277 number = insn;
2278
2279 /* Check for wrap-arounds. */
2280 if (number != insn)
2281 error (_("Instruction number out of range."));
2282
2283 tp = require_btrace_thread ();
2284
2285 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
2286 if (found == 0)
2287 error (_("No such instruction."));
2288
2289 record_btrace_set_replay (tp, &it);
2290 }
2291
2292 /* The to_execution_direction target method. */
2293
2294 static enum exec_direction_kind
2295 record_btrace_execution_direction (struct target_ops *self)
2296 {
2297 return record_btrace_resume_exec_dir;
2298 }
2299
2300 /* The to_prepare_to_generate_core target method. */
2301
2302 static void
2303 record_btrace_prepare_to_generate_core (struct target_ops *self)
2304 {
2305 record_btrace_generating_corefile = 1;
2306 }
2307
2308 /* The to_done_generating_core target method. */
2309
2310 static void
2311 record_btrace_done_generating_core (struct target_ops *self)
2312 {
2313 record_btrace_generating_corefile = 0;
2314 }
2315
2316 /* Initialize the record-btrace target ops. */
2317
2318 static void
2319 init_record_btrace_ops (void)
2320 {
2321 struct target_ops *ops;
2322
2323 ops = &record_btrace_ops;
2324 ops->to_shortname = "record-btrace";
2325 ops->to_longname = "Branch tracing target";
2326 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2327 ops->to_open = record_btrace_open;
2328 ops->to_close = record_btrace_close;
2329 ops->to_async = record_btrace_async;
2330 ops->to_detach = record_detach;
2331 ops->to_disconnect = record_disconnect;
2332 ops->to_mourn_inferior = record_mourn_inferior;
2333 ops->to_kill = record_kill;
2334 ops->to_stop_recording = record_btrace_stop_recording;
2335 ops->to_info_record = record_btrace_info;
2336 ops->to_insn_history = record_btrace_insn_history;
2337 ops->to_insn_history_from = record_btrace_insn_history_from;
2338 ops->to_insn_history_range = record_btrace_insn_history_range;
2339 ops->to_call_history = record_btrace_call_history;
2340 ops->to_call_history_from = record_btrace_call_history_from;
2341 ops->to_call_history_range = record_btrace_call_history_range;
2342 ops->to_record_is_replaying = record_btrace_is_replaying;
2343 ops->to_xfer_partial = record_btrace_xfer_partial;
2344 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2345 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
2346 ops->to_fetch_registers = record_btrace_fetch_registers;
2347 ops->to_store_registers = record_btrace_store_registers;
2348 ops->to_prepare_to_store = record_btrace_prepare_to_store;
2349 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2350 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
2351 ops->to_resume = record_btrace_resume;
2352 ops->to_wait = record_btrace_wait;
2353 ops->to_update_thread_list = record_btrace_update_thread_list;
2354 ops->to_thread_alive = record_btrace_thread_alive;
2355 ops->to_goto_record_begin = record_btrace_goto_begin;
2356 ops->to_goto_record_end = record_btrace_goto_end;
2357 ops->to_goto_record = record_btrace_goto;
2358 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
2359 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2360 ops->to_supports_stopped_by_sw_breakpoint
2361 = record_btrace_supports_stopped_by_sw_breakpoint;
2362 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2363 ops->to_supports_stopped_by_hw_breakpoint
2364 = record_btrace_supports_stopped_by_hw_breakpoint;
2365 ops->to_execution_direction = record_btrace_execution_direction;
2366 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2367 ops->to_done_generating_core = record_btrace_done_generating_core;
2368 ops->to_stratum = record_stratum;
2369 ops->to_magic = OPS_MAGIC;
2370 }
2371
2372 /* Start recording in BTS format. */
2373
2374 static void
2375 cmd_record_btrace_bts_start (char *args, int from_tty)
2376 {
2377 if (args != NULL && *args != 0)
2378 error (_("Invalid argument."));
2379
2380 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2381
2382 TRY
2383 {
2384 execute_command ("target record-btrace", from_tty);
2385 }
2386 CATCH (exception, RETURN_MASK_ALL)
2387 {
2388 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2389 throw_exception (exception);
2390 }
2391 END_CATCH
2392 }
2393
2394 /* Start recording Intel(R) Processor Trace. */
2395
2396 static void
2397 cmd_record_btrace_pt_start (char *args, int from_tty)
2398 {
2399 if (args != NULL && *args != 0)
2400 error (_("Invalid argument."));
2401
2402 record_btrace_conf.format = BTRACE_FORMAT_PT;
2403
2404 TRY
2405 {
2406 execute_command ("target record-btrace", from_tty);
2407 }
2408 CATCH (exception, RETURN_MASK_ALL)
2409 {
2410 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2411 throw_exception (exception);
2412 }
2413 END_CATCH
2414 }
2415
2416 /* Alias for "target record". */
2417
2418 static void
2419 cmd_record_btrace_start (char *args, int from_tty)
2420 {
2421 if (args != NULL && *args != 0)
2422 error (_("Invalid argument."));
2423
2424 record_btrace_conf.format = BTRACE_FORMAT_PT;
2425
2426 TRY
2427 {
2428 execute_command ("target record-btrace", from_tty);
2429 }
2430 CATCH (exception, RETURN_MASK_ALL)
2431 {
2432 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2433
2434 TRY
2435 {
2436 execute_command ("target record-btrace", from_tty);
2437 }
2438 CATCH (exception, RETURN_MASK_ALL)
2439 {
2440 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2441 throw_exception (exception);
2442 }
2443 END_CATCH
2444 }
2445 END_CATCH
2446 }
2447
2448 /* The "set record btrace" command. */
2449
2450 static void
2451 cmd_set_record_btrace (char *args, int from_tty)
2452 {
2453 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2454 }
2455
2456 /* The "show record btrace" command. */
2457
2458 static void
2459 cmd_show_record_btrace (char *args, int from_tty)
2460 {
2461 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2462 }
2463
2464 /* The "show record btrace replay-memory-access" command. */
2465
2466 static void
2467 cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2468 struct cmd_list_element *c, const char *value)
2469 {
2470 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2471 replay_memory_access);
2472 }
2473
2474 /* The "set record btrace bts" command. */
2475
2476 static void
2477 cmd_set_record_btrace_bts (char *args, int from_tty)
2478 {
2479 printf_unfiltered (_("\"set record btrace bts\" must be followed "
2480 "by an appropriate subcommand.\n"));
2481 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
2482 all_commands, gdb_stdout);
2483 }
2484
2485 /* The "show record btrace bts" command. */
2486
2487 static void
2488 cmd_show_record_btrace_bts (char *args, int from_tty)
2489 {
2490 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
2491 }
2492
2493 /* The "set record btrace pt" command. */
2494
2495 static void
2496 cmd_set_record_btrace_pt (char *args, int from_tty)
2497 {
2498 printf_unfiltered (_("\"set record btrace pt\" must be followed "
2499 "by an appropriate subcommand.\n"));
2500 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
2501 all_commands, gdb_stdout);
2502 }
2503
2504 /* The "show record btrace pt" command. */
2505
2506 static void
2507 cmd_show_record_btrace_pt (char *args, int from_tty)
2508 {
2509 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
2510 }
2511
2512 /* The "record bts buffer-size" show value function. */
2513
2514 static void
2515 show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
2516 struct cmd_list_element *c,
2517 const char *value)
2518 {
2519 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
2520 value);
2521 }
2522
2523 /* The "record pt buffer-size" show value function. */
2524
2525 static void
2526 show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
2527 struct cmd_list_element *c,
2528 const char *value)
2529 {
2530 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
2531 value);
2532 }
2533
2534 void _initialize_record_btrace (void);
2535
2536 /* Initialize btrace commands. */
2537
2538 void
2539 _initialize_record_btrace (void)
2540 {
2541 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2542 _("Start branch trace recording."), &record_btrace_cmdlist,
2543 "record btrace ", 0, &record_cmdlist);
2544 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2545
2546 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
2547 _("\
2548 Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
2549 The processor stores a from/to record for each branch into a cyclic buffer.\n\
2550 This format may not be available on all processors."),
2551 &record_btrace_cmdlist);
2552 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
2553
2554 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
2555 _("\
2556 Start branch trace recording in Intel(R) Processor Trace format.\n\n\
2557 This format may not be available on all processors."),
2558 &record_btrace_cmdlist);
2559 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
2560
2561 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2562 _("Set record options"), &set_record_btrace_cmdlist,
2563 "set record btrace ", 0, &set_record_cmdlist);
2564
2565 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2566 _("Show record options"), &show_record_btrace_cmdlist,
2567 "show record btrace ", 0, &show_record_cmdlist);
2568
2569 add_setshow_enum_cmd ("replay-memory-access", no_class,
2570 replay_memory_access_types, &replay_memory_access, _("\
2571 Set what memory accesses are allowed during replay."), _("\
2572 Show what memory accesses are allowed during replay."),
2573 _("Default is READ-ONLY.\n\n\
2574 The btrace record target does not trace data.\n\
2575 The memory therefore corresponds to the live target and not \
2576 to the current replay position.\n\n\
2577 When READ-ONLY, allow accesses to read-only memory during replay.\n\
2578 When READ-WRITE, allow accesses to read-only and read-write memory during \
2579 replay."),
2580 NULL, cmd_show_replay_memory_access,
2581 &set_record_btrace_cmdlist,
2582 &show_record_btrace_cmdlist);
2583
2584 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
2585 _("Set record btrace bts options"),
2586 &set_record_btrace_bts_cmdlist,
2587 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
2588
2589 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
2590 _("Show record btrace bts options"),
2591 &show_record_btrace_bts_cmdlist,
2592 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
2593
2594 add_setshow_uinteger_cmd ("buffer-size", no_class,
2595 &record_btrace_conf.bts.size,
2596 _("Set the record/replay bts buffer size."),
2597 _("Show the record/replay bts buffer size."), _("\
2598 When starting recording request a trace buffer of this size. \
2599 The actual buffer size may differ from the requested size. \
2600 Use \"info record\" to see the actual buffer size.\n\n\
2601 Bigger buffers allow longer recording but also take more time to process \
2602 the recorded execution trace.\n\n\
2603 The trace buffer size may not be changed while recording."), NULL,
2604 show_record_bts_buffer_size_value,
2605 &set_record_btrace_bts_cmdlist,
2606 &show_record_btrace_bts_cmdlist);
2607
2608 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
2609 _("Set record btrace pt options"),
2610 &set_record_btrace_pt_cmdlist,
2611 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
2612
2613 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
2614 _("Show record btrace pt options"),
2615 &show_record_btrace_pt_cmdlist,
2616 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
2617
2618 add_setshow_uinteger_cmd ("buffer-size", no_class,
2619 &record_btrace_conf.pt.size,
2620 _("Set the record/replay pt buffer size."),
2621 _("Show the record/replay pt buffer size."), _("\
2622 Bigger buffers allow longer recording but also take more time to process \
2623 the recorded execution.\n\
2624 The actual buffer size may differ from the requested size. Use \"info record\" \
2625 to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
2626 &set_record_btrace_pt_cmdlist,
2627 &show_record_btrace_pt_cmdlist);
2628
2629 init_record_btrace_ops ();
2630 add_target (&record_btrace_ops);
2631
2632 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2633 xcalloc, xfree);
2634
2635 record_btrace_conf.bts.size = 64 * 1024;
2636 record_btrace_conf.pt.size = 16 * 1024;
2637 }