]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
btrace: Remove VEC cleanups
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
e2882c85 3 Copyright (C) 2013-2018 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
c0272db5 24#include "record-btrace.h"
afedecd3
MM
25#include "gdbthread.h"
26#include "target.h"
27#include "gdbcmd.h"
28#include "disasm.h"
29#include "observer.h"
afedecd3
MM
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
e3cfc1c7 41#include "vec.h"
325fac50 42#include <algorithm>
afedecd3
MM
43
44/* The target_ops of record-btrace. */
45static struct target_ops record_btrace_ops;
46
47/* A new thread observer enabling branch tracing for the new thread. */
48static struct observer *record_btrace_thread_observer;
49
67b5c0c1
MM
50/* Memory access types used in set/show record btrace replay-memory-access. */
51static const char replay_memory_access_read_only[] = "read-only";
52static const char replay_memory_access_read_write[] = "read-write";
53static const char *const replay_memory_access_types[] =
54{
55 replay_memory_access_read_only,
56 replay_memory_access_read_write,
57 NULL
58};
59
60/* The currently allowed replay memory access type. */
61static const char *replay_memory_access = replay_memory_access_read_only;
62
63/* Command lists for "set/show record btrace". */
64static struct cmd_list_element *set_record_btrace_cmdlist;
65static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 66
70ad5bff
MM
67/* The execution direction of the last resume we got. See record-full.c. */
68static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
69
70/* The async event handler for reverse/replay execution. */
71static struct async_event_handler *record_btrace_async_inferior_event_handler;
72
aef92902
MM
73/* A flag indicating that we are currently generating a core file. */
74static int record_btrace_generating_corefile;
75
f4abbc16
MM
76/* The current branch trace configuration. */
77static struct btrace_config record_btrace_conf;
78
79/* Command list for "record btrace". */
80static struct cmd_list_element *record_btrace_cmdlist;
81
d33501a5
MM
82/* Command lists for "set/show record btrace bts". */
83static struct cmd_list_element *set_record_btrace_bts_cmdlist;
84static struct cmd_list_element *show_record_btrace_bts_cmdlist;
85
b20a6524
MM
86/* Command lists for "set/show record btrace pt". */
87static struct cmd_list_element *set_record_btrace_pt_cmdlist;
88static struct cmd_list_element *show_record_btrace_pt_cmdlist;
89
afedecd3
MM
90/* Print a record-btrace debug message. Use do ... while (0) to avoid
91 ambiguities when used in if statements. */
92
93#define DEBUG(msg, args...) \
94 do \
95 { \
96 if (record_debug != 0) \
97 fprintf_unfiltered (gdb_stdlog, \
98 "[record-btrace] " msg "\n", ##args); \
99 } \
100 while (0)
101
102
103/* Update the branch trace for the current thread and return a pointer to its
066ce621 104 thread_info.
afedecd3
MM
105
106 Throws an error if there is no thread or no trace. This function never
107 returns NULL. */
108
066ce621
MM
109static struct thread_info *
110require_btrace_thread (void)
afedecd3
MM
111{
112 struct thread_info *tp;
afedecd3
MM
113
114 DEBUG ("require");
115
116 tp = find_thread_ptid (inferior_ptid);
117 if (tp == NULL)
118 error (_("No thread."));
119
cd4007e4
MM
120 validate_registers_access ();
121
afedecd3
MM
122 btrace_fetch (tp);
123
6e07b1d2 124 if (btrace_is_empty (tp))
afedecd3
MM
125 error (_("No trace."));
126
066ce621
MM
127 return tp;
128}
129
130/* Update the branch trace for the current thread and return a pointer to its
131 branch trace information struct.
132
133 Throws an error if there is no thread or no trace. This function never
134 returns NULL. */
135
136static struct btrace_thread_info *
137require_btrace (void)
138{
139 struct thread_info *tp;
140
141 tp = require_btrace_thread ();
142
143 return &tp->btrace;
afedecd3
MM
144}
145
146/* Enable branch tracing for one thread. Warn on errors. */
147
148static void
149record_btrace_enable_warn (struct thread_info *tp)
150{
492d29ea
PA
151 TRY
152 {
153 btrace_enable (tp, &record_btrace_conf);
154 }
155 CATCH (error, RETURN_MASK_ERROR)
156 {
157 warning ("%s", error.message);
158 }
159 END_CATCH
afedecd3
MM
160}
161
afedecd3
MM
162/* Enable automatic tracing of new threads. */
163
164static void
165record_btrace_auto_enable (void)
166{
167 DEBUG ("attach thread observer");
168
169 record_btrace_thread_observer
170 = observer_attach_new_thread (record_btrace_enable_warn);
171}
172
173/* Disable automatic tracing of new threads. */
174
175static void
176record_btrace_auto_disable (void)
177{
178 /* The observer may have been detached, already. */
179 if (record_btrace_thread_observer == NULL)
180 return;
181
182 DEBUG ("detach thread observer");
183
184 observer_detach_new_thread (record_btrace_thread_observer);
185 record_btrace_thread_observer = NULL;
186}
187
70ad5bff
MM
188/* The record-btrace async event handler function. */
189
190static void
191record_btrace_handle_async_inferior_event (gdb_client_data data)
192{
193 inferior_event_handler (INF_REG_EVENT, NULL);
194}
195
c0272db5
TW
196/* See record-btrace.h. */
197
198void
199record_btrace_push_target (void)
200{
201 const char *format;
202
203 record_btrace_auto_enable ();
204
205 push_target (&record_btrace_ops);
206
207 record_btrace_async_inferior_event_handler
208 = create_async_event_handler (record_btrace_handle_async_inferior_event,
209 NULL);
210 record_btrace_generating_corefile = 0;
211
212 format = btrace_format_short_string (record_btrace_conf.format);
213 observer_notify_record_changed (current_inferior (), 1, "btrace", format);
214}
215
228f1508
SM
216/* Disable btrace on a set of threads on scope exit. */
217
218struct scoped_btrace_disable
219{
220 scoped_btrace_disable () = default;
221
222 DISABLE_COPY_AND_ASSIGN (scoped_btrace_disable);
223
224 ~scoped_btrace_disable ()
225 {
226 for (thread_info *tp : m_threads)
227 btrace_disable (tp);
228 }
229
230 void add_thread (thread_info *thread)
231 {
232 m_threads.push_front (thread);
233 }
234
235 void discard ()
236 {
237 m_threads.clear ();
238 }
239
240private:
241 std::forward_list<thread_info *> m_threads;
242};
243
afedecd3
MM
244/* The to_open method of target record-btrace. */
245
246static void
014f9477 247record_btrace_open (const char *args, int from_tty)
afedecd3 248{
228f1508
SM
249 /* If we fail to enable btrace for one thread, disable it for the threads for
250 which it was successfully enabled. */
251 scoped_btrace_disable btrace_disable;
afedecd3
MM
252 struct thread_info *tp;
253
254 DEBUG ("open");
255
8213266a 256 record_preopen ();
afedecd3
MM
257
258 if (!target_has_execution)
259 error (_("The program is not being run."));
260
afedecd3
MM
261 gdb_assert (record_btrace_thread_observer == NULL);
262
034f788c 263 ALL_NON_EXITED_THREADS (tp)
5d5658a1 264 if (args == NULL || *args == 0 || number_is_in_list (args, tp->global_num))
afedecd3 265 {
f4abbc16 266 btrace_enable (tp, &record_btrace_conf);
afedecd3 267
228f1508 268 btrace_disable.add_thread (tp);
afedecd3
MM
269 }
270
c0272db5 271 record_btrace_push_target ();
afedecd3 272
228f1508 273 btrace_disable.discard ();
afedecd3
MM
274}
275
276/* The to_stop_recording method of target record-btrace. */
277
278static void
c6cd7c02 279record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
280{
281 struct thread_info *tp;
282
283 DEBUG ("stop recording");
284
285 record_btrace_auto_disable ();
286
034f788c 287 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
288 if (tp->btrace.target != NULL)
289 btrace_disable (tp);
290}
291
c0272db5
TW
292/* The to_disconnect method of target record-btrace. */
293
294static void
295record_btrace_disconnect (struct target_ops *self, const char *args,
296 int from_tty)
297{
298 struct target_ops *beneath = self->beneath;
299
300 /* Do not stop recording, just clean up GDB side. */
301 unpush_target (self);
302
303 /* Forward disconnect. */
304 beneath->to_disconnect (beneath, args, from_tty);
305}
306
afedecd3
MM
307/* The to_close method of target record-btrace. */
308
309static void
de90e03d 310record_btrace_close (struct target_ops *self)
afedecd3 311{
568e808b
MM
312 struct thread_info *tp;
313
70ad5bff
MM
314 if (record_btrace_async_inferior_event_handler != NULL)
315 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
316
99c819ee
MM
317 /* Make sure automatic recording gets disabled even if we did not stop
318 recording before closing the record-btrace target. */
319 record_btrace_auto_disable ();
320
568e808b
MM
321 /* We should have already stopped recording.
322 Tear down btrace in case we have not. */
034f788c 323 ALL_NON_EXITED_THREADS (tp)
568e808b 324 btrace_teardown (tp);
afedecd3
MM
325}
326
b7d2e916
PA
327/* The to_async method of target record-btrace. */
328
329static void
6a3753b3 330record_btrace_async (struct target_ops *ops, int enable)
b7d2e916 331{
6a3753b3 332 if (enable)
b7d2e916
PA
333 mark_async_event_handler (record_btrace_async_inferior_event_handler);
334 else
335 clear_async_event_handler (record_btrace_async_inferior_event_handler);
336
6a3753b3 337 ops->beneath->to_async (ops->beneath, enable);
b7d2e916
PA
338}
339
d33501a5
MM
340/* Adjusts the size and returns a human readable size suffix. */
341
342static const char *
343record_btrace_adjust_size (unsigned int *size)
344{
345 unsigned int sz;
346
347 sz = *size;
348
349 if ((sz & ((1u << 30) - 1)) == 0)
350 {
351 *size = sz >> 30;
352 return "GB";
353 }
354 else if ((sz & ((1u << 20) - 1)) == 0)
355 {
356 *size = sz >> 20;
357 return "MB";
358 }
359 else if ((sz & ((1u << 10) - 1)) == 0)
360 {
361 *size = sz >> 10;
362 return "kB";
363 }
364 else
365 return "";
366}
367
368/* Print a BTS configuration. */
369
370static void
371record_btrace_print_bts_conf (const struct btrace_config_bts *conf)
372{
373 const char *suffix;
374 unsigned int size;
375
376 size = conf->size;
377 if (size > 0)
378 {
379 suffix = record_btrace_adjust_size (&size);
380 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
381 }
382}
383
bc504a31 384/* Print an Intel Processor Trace configuration. */
b20a6524
MM
385
386static void
387record_btrace_print_pt_conf (const struct btrace_config_pt *conf)
388{
389 const char *suffix;
390 unsigned int size;
391
392 size = conf->size;
393 if (size > 0)
394 {
395 suffix = record_btrace_adjust_size (&size);
396 printf_unfiltered (_("Buffer size: %u%s.\n"), size, suffix);
397 }
398}
399
d33501a5
MM
400/* Print a branch tracing configuration. */
401
402static void
403record_btrace_print_conf (const struct btrace_config *conf)
404{
405 printf_unfiltered (_("Recording format: %s.\n"),
406 btrace_format_string (conf->format));
407
408 switch (conf->format)
409 {
410 case BTRACE_FORMAT_NONE:
411 return;
412
413 case BTRACE_FORMAT_BTS:
414 record_btrace_print_bts_conf (&conf->bts);
415 return;
b20a6524
MM
416
417 case BTRACE_FORMAT_PT:
418 record_btrace_print_pt_conf (&conf->pt);
419 return;
d33501a5
MM
420 }
421
422 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
423}
424
afedecd3
MM
425/* The to_info_record method of target record-btrace. */
426
427static void
630d6a4a 428record_btrace_info (struct target_ops *self)
afedecd3
MM
429{
430 struct btrace_thread_info *btinfo;
f4abbc16 431 const struct btrace_config *conf;
afedecd3 432 struct thread_info *tp;
31fd9caa 433 unsigned int insns, calls, gaps;
afedecd3
MM
434
435 DEBUG ("info");
436
437 tp = find_thread_ptid (inferior_ptid);
438 if (tp == NULL)
439 error (_("No thread."));
440
cd4007e4
MM
441 validate_registers_access ();
442
f4abbc16
MM
443 btinfo = &tp->btrace;
444
445 conf = btrace_conf (btinfo);
446 if (conf != NULL)
d33501a5 447 record_btrace_print_conf (conf);
f4abbc16 448
afedecd3
MM
449 btrace_fetch (tp);
450
23a7fe75
MM
451 insns = 0;
452 calls = 0;
31fd9caa 453 gaps = 0;
23a7fe75 454
6e07b1d2 455 if (!btrace_is_empty (tp))
23a7fe75
MM
456 {
457 struct btrace_call_iterator call;
458 struct btrace_insn_iterator insn;
459
460 btrace_call_end (&call, btinfo);
461 btrace_call_prev (&call, 1);
5de9129b 462 calls = btrace_call_number (&call);
23a7fe75
MM
463
464 btrace_insn_end (&insn, btinfo);
5de9129b 465 insns = btrace_insn_number (&insn);
31fd9caa 466
69090cee
TW
467 /* If the last instruction is not a gap, it is the current instruction
468 that is not actually part of the record. */
469 if (btrace_insn_get (&insn) != NULL)
470 insns -= 1;
31fd9caa
MM
471
472 gaps = btinfo->ngaps;
23a7fe75 473 }
afedecd3 474
31fd9caa 475 printf_unfiltered (_("Recorded %u instructions in %u functions (%u gaps) "
43792cf0
PA
476 "for thread %s (%s).\n"), insns, calls, gaps,
477 print_thread_id (tp), target_pid_to_str (tp->ptid));
07bbe694
MM
478
479 if (btrace_is_replaying (tp))
480 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
481 btrace_insn_number (btinfo->replay));
afedecd3
MM
482}
483
31fd9caa
MM
484/* Print a decode error. */
485
486static void
487btrace_ui_out_decode_error (struct ui_out *uiout, int errcode,
488 enum btrace_format format)
489{
508352a9 490 const char *errstr = btrace_decode_error (format, errcode);
31fd9caa 491
112e8700 492 uiout->text (_("["));
508352a9
TW
493 /* ERRCODE > 0 indicates notifications on BTRACE_FORMAT_PT. */
494 if (!(format == BTRACE_FORMAT_PT && errcode > 0))
31fd9caa 495 {
112e8700
SM
496 uiout->text (_("decode error ("));
497 uiout->field_int ("errcode", errcode);
498 uiout->text (_("): "));
31fd9caa 499 }
112e8700
SM
500 uiout->text (errstr);
501 uiout->text (_("]\n"));
31fd9caa
MM
502}
503
afedecd3
MM
504/* Print an unsigned int. */
505
506static void
507ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
508{
112e8700 509 uiout->field_fmt (fld, "%u", val);
afedecd3
MM
510}
511
f94cc897
MM
512/* A range of source lines. */
513
514struct btrace_line_range
515{
516 /* The symtab this line is from. */
517 struct symtab *symtab;
518
519 /* The first line (inclusive). */
520 int begin;
521
522 /* The last line (exclusive). */
523 int end;
524};
525
526/* Construct a line range. */
527
528static struct btrace_line_range
529btrace_mk_line_range (struct symtab *symtab, int begin, int end)
530{
531 struct btrace_line_range range;
532
533 range.symtab = symtab;
534 range.begin = begin;
535 range.end = end;
536
537 return range;
538}
539
540/* Add a line to a line range. */
541
542static struct btrace_line_range
543btrace_line_range_add (struct btrace_line_range range, int line)
544{
545 if (range.end <= range.begin)
546 {
547 /* This is the first entry. */
548 range.begin = line;
549 range.end = line + 1;
550 }
551 else if (line < range.begin)
552 range.begin = line;
553 else if (range.end < line)
554 range.end = line;
555
556 return range;
557}
558
559/* Return non-zero if RANGE is empty, zero otherwise. */
560
561static int
562btrace_line_range_is_empty (struct btrace_line_range range)
563{
564 return range.end <= range.begin;
565}
566
567/* Return non-zero if LHS contains RHS, zero otherwise. */
568
569static int
570btrace_line_range_contains_range (struct btrace_line_range lhs,
571 struct btrace_line_range rhs)
572{
573 return ((lhs.symtab == rhs.symtab)
574 && (lhs.begin <= rhs.begin)
575 && (rhs.end <= lhs.end));
576}
577
578/* Find the line range associated with PC. */
579
580static struct btrace_line_range
581btrace_find_line_range (CORE_ADDR pc)
582{
583 struct btrace_line_range range;
584 struct linetable_entry *lines;
585 struct linetable *ltable;
586 struct symtab *symtab;
587 int nlines, i;
588
589 symtab = find_pc_line_symtab (pc);
590 if (symtab == NULL)
591 return btrace_mk_line_range (NULL, 0, 0);
592
593 ltable = SYMTAB_LINETABLE (symtab);
594 if (ltable == NULL)
595 return btrace_mk_line_range (symtab, 0, 0);
596
597 nlines = ltable->nitems;
598 lines = ltable->item;
599 if (nlines <= 0)
600 return btrace_mk_line_range (symtab, 0, 0);
601
602 range = btrace_mk_line_range (symtab, 0, 0);
603 for (i = 0; i < nlines - 1; i++)
604 {
605 if ((lines[i].pc == pc) && (lines[i].line != 0))
606 range = btrace_line_range_add (range, lines[i].line);
607 }
608
609 return range;
610}
611
612/* Print source lines in LINES to UIOUT.
613
614 UI_ITEM_CHAIN is a cleanup chain for the last source line and the
615 instructions corresponding to that source line. When printing a new source
616 line, we do the cleanups for the open chain and open a new cleanup chain for
617 the new source line. If the source line range in LINES is not empty, this
618 function will leave the cleanup chain for the last printed source line open
619 so instructions can be added to it. */
620
621static void
622btrace_print_lines (struct btrace_line_range lines, struct ui_out *uiout,
3dea1ef7 623 struct cleanup **ui_item_chain, gdb_disassembly_flags flags)
f94cc897 624{
8d297bbf 625 print_source_lines_flags psl_flags;
f94cc897
MM
626 int line;
627
628 psl_flags = 0;
629 if (flags & DISASSEMBLY_FILENAME)
630 psl_flags |= PRINT_SOURCE_LINES_FILENAME;
631
632 for (line = lines.begin; line < lines.end; ++line)
633 {
634 if (*ui_item_chain != NULL)
635 do_cleanups (*ui_item_chain);
636
637 *ui_item_chain
638 = make_cleanup_ui_out_tuple_begin_end (uiout, "src_and_asm_line");
639
640 print_source_lines (lines.symtab, line, line + 1, psl_flags);
641
642 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
643 }
644}
645
afedecd3
MM
646/* Disassemble a section of the recorded instruction trace. */
647
648static void
23a7fe75 649btrace_insn_history (struct ui_out *uiout,
31fd9caa 650 const struct btrace_thread_info *btinfo,
23a7fe75 651 const struct btrace_insn_iterator *begin,
9a24775b
PA
652 const struct btrace_insn_iterator *end,
653 gdb_disassembly_flags flags)
afedecd3 654{
f94cc897 655 struct cleanup *cleanups, *ui_item_chain;
afedecd3 656 struct gdbarch *gdbarch;
23a7fe75 657 struct btrace_insn_iterator it;
f94cc897 658 struct btrace_line_range last_lines;
afedecd3 659
9a24775b
PA
660 DEBUG ("itrace (0x%x): [%u; %u)", (unsigned) flags,
661 btrace_insn_number (begin), btrace_insn_number (end));
afedecd3 662
f94cc897
MM
663 flags |= DISASSEMBLY_SPECULATIVE;
664
afedecd3 665 gdbarch = target_gdbarch ();
f94cc897
MM
666 last_lines = btrace_mk_line_range (NULL, 0, 0);
667
187808b0 668 cleanups = make_cleanup_ui_out_list_begin_end (uiout, "asm_insns");
f94cc897
MM
669
670 /* UI_ITEM_CHAIN is a cleanup chain for the last source line and the
671 instructions corresponding to that line. */
672 ui_item_chain = NULL;
afedecd3 673
8b172ce7
PA
674 gdb_pretty_print_disassembler disasm (gdbarch);
675
23a7fe75 676 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 677 {
23a7fe75
MM
678 const struct btrace_insn *insn;
679
680 insn = btrace_insn_get (&it);
681
31fd9caa
MM
682 /* A NULL instruction indicates a gap in the trace. */
683 if (insn == NULL)
684 {
685 const struct btrace_config *conf;
686
687 conf = btrace_conf (btinfo);
afedecd3 688
31fd9caa
MM
689 /* We have trace so we must have a configuration. */
690 gdb_assert (conf != NULL);
691
69090cee
TW
692 uiout->field_fmt ("insn-number", "%u",
693 btrace_insn_number (&it));
694 uiout->text ("\t");
695
696 btrace_ui_out_decode_error (uiout, btrace_insn_get_error (&it),
31fd9caa
MM
697 conf->format);
698 }
699 else
700 {
f94cc897 701 struct disasm_insn dinsn;
da8c46d2 702
f94cc897 703 if ((flags & DISASSEMBLY_SOURCE) != 0)
da8c46d2 704 {
f94cc897
MM
705 struct btrace_line_range lines;
706
707 lines = btrace_find_line_range (insn->pc);
708 if (!btrace_line_range_is_empty (lines)
709 && !btrace_line_range_contains_range (last_lines, lines))
710 {
711 btrace_print_lines (lines, uiout, &ui_item_chain, flags);
712 last_lines = lines;
713 }
714 else if (ui_item_chain == NULL)
715 {
716 ui_item_chain
717 = make_cleanup_ui_out_tuple_begin_end (uiout,
718 "src_and_asm_line");
719 /* No source information. */
720 make_cleanup_ui_out_list_begin_end (uiout, "line_asm_insn");
721 }
722
723 gdb_assert (ui_item_chain != NULL);
da8c46d2 724 }
da8c46d2 725
f94cc897
MM
726 memset (&dinsn, 0, sizeof (dinsn));
727 dinsn.number = btrace_insn_number (&it);
728 dinsn.addr = insn->pc;
31fd9caa 729
da8c46d2 730 if ((insn->flags & BTRACE_INSN_FLAG_SPECULATIVE) != 0)
f94cc897 731 dinsn.is_speculative = 1;
da8c46d2 732
8b172ce7 733 disasm.pretty_print_insn (uiout, &dinsn, flags);
31fd9caa 734 }
afedecd3 735 }
f94cc897
MM
736
737 do_cleanups (cleanups);
afedecd3
MM
738}
739
740/* The to_insn_history method of target record-btrace. */
741
742static void
9a24775b
PA
743record_btrace_insn_history (struct target_ops *self, int size,
744 gdb_disassembly_flags flags)
afedecd3
MM
745{
746 struct btrace_thread_info *btinfo;
23a7fe75
MM
747 struct btrace_insn_history *history;
748 struct btrace_insn_iterator begin, end;
afedecd3 749 struct ui_out *uiout;
23a7fe75 750 unsigned int context, covered;
afedecd3
MM
751
752 uiout = current_uiout;
2e783024 753 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 754 context = abs (size);
afedecd3
MM
755 if (context == 0)
756 error (_("Bad record instruction-history-size."));
757
23a7fe75
MM
758 btinfo = require_btrace ();
759 history = btinfo->insn_history;
760 if (history == NULL)
afedecd3 761 {
07bbe694 762 struct btrace_insn_iterator *replay;
afedecd3 763
9a24775b 764 DEBUG ("insn-history (0x%x): %d", (unsigned) flags, size);
afedecd3 765
07bbe694
MM
766 /* If we're replaying, we start at the replay position. Otherwise, we
767 start at the tail of the trace. */
768 replay = btinfo->replay;
769 if (replay != NULL)
770 begin = *replay;
771 else
772 btrace_insn_end (&begin, btinfo);
773
774 /* We start from here and expand in the requested direction. Then we
775 expand in the other direction, as well, to fill up any remaining
776 context. */
777 end = begin;
778 if (size < 0)
779 {
780 /* We want the current position covered, as well. */
781 covered = btrace_insn_next (&end, 1);
782 covered += btrace_insn_prev (&begin, context - covered);
783 covered += btrace_insn_next (&end, context - covered);
784 }
785 else
786 {
787 covered = btrace_insn_next (&end, context);
788 covered += btrace_insn_prev (&begin, context - covered);
789 }
afedecd3
MM
790 }
791 else
792 {
23a7fe75
MM
793 begin = history->begin;
794 end = history->end;
afedecd3 795
9a24775b 796 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", (unsigned) flags, size,
23a7fe75 797 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 798
23a7fe75
MM
799 if (size < 0)
800 {
801 end = begin;
802 covered = btrace_insn_prev (&begin, context);
803 }
804 else
805 {
806 begin = end;
807 covered = btrace_insn_next (&end, context);
808 }
afedecd3
MM
809 }
810
23a7fe75 811 if (covered > 0)
31fd9caa 812 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
813 else
814 {
815 if (size < 0)
816 printf_unfiltered (_("At the start of the branch trace record.\n"));
817 else
818 printf_unfiltered (_("At the end of the branch trace record.\n"));
819 }
afedecd3 820
23a7fe75 821 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
822}
823
824/* The to_insn_history_range method of target record-btrace. */
825
826static void
4e99c6b7 827record_btrace_insn_history_range (struct target_ops *self,
9a24775b
PA
828 ULONGEST from, ULONGEST to,
829 gdb_disassembly_flags flags)
afedecd3
MM
830{
831 struct btrace_thread_info *btinfo;
23a7fe75 832 struct btrace_insn_iterator begin, end;
afedecd3 833 struct ui_out *uiout;
23a7fe75
MM
834 unsigned int low, high;
835 int found;
afedecd3
MM
836
837 uiout = current_uiout;
2e783024 838 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
23a7fe75
MM
839 low = from;
840 high = to;
afedecd3 841
9a24775b 842 DEBUG ("insn-history (0x%x): [%u; %u)", (unsigned) flags, low, high);
afedecd3
MM
843
844 /* Check for wrap-arounds. */
23a7fe75 845 if (low != from || high != to)
afedecd3
MM
846 error (_("Bad range."));
847
0688d04e 848 if (high < low)
afedecd3
MM
849 error (_("Bad range."));
850
23a7fe75 851 btinfo = require_btrace ();
afedecd3 852
23a7fe75
MM
853 found = btrace_find_insn_by_number (&begin, btinfo, low);
854 if (found == 0)
855 error (_("Range out of bounds."));
afedecd3 856
23a7fe75
MM
857 found = btrace_find_insn_by_number (&end, btinfo, high);
858 if (found == 0)
0688d04e
MM
859 {
860 /* Silently truncate the range. */
861 btrace_insn_end (&end, btinfo);
862 }
863 else
864 {
865 /* We want both begin and end to be inclusive. */
866 btrace_insn_next (&end, 1);
867 }
afedecd3 868
31fd9caa 869 btrace_insn_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 870 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
871}
872
873/* The to_insn_history_from method of target record-btrace. */
874
875static void
9abc3ff3 876record_btrace_insn_history_from (struct target_ops *self,
9a24775b
PA
877 ULONGEST from, int size,
878 gdb_disassembly_flags flags)
afedecd3
MM
879{
880 ULONGEST begin, end, context;
881
882 context = abs (size);
0688d04e
MM
883 if (context == 0)
884 error (_("Bad record instruction-history-size."));
afedecd3
MM
885
886 if (size < 0)
887 {
888 end = from;
889
890 if (from < context)
891 begin = 0;
892 else
0688d04e 893 begin = from - context + 1;
afedecd3
MM
894 }
895 else
896 {
897 begin = from;
0688d04e 898 end = from + context - 1;
afedecd3
MM
899
900 /* Check for wrap-around. */
901 if (end < begin)
902 end = ULONGEST_MAX;
903 }
904
4e99c6b7 905 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
906}
907
908/* Print the instruction number range for a function call history line. */
909
910static void
23a7fe75
MM
911btrace_call_history_insn_range (struct ui_out *uiout,
912 const struct btrace_function *bfun)
afedecd3 913{
7acbe133
MM
914 unsigned int begin, end, size;
915
0860c437 916 size = bfun->insn.size ();
7acbe133 917 gdb_assert (size > 0);
afedecd3 918
23a7fe75 919 begin = bfun->insn_offset;
7acbe133 920 end = begin + size - 1;
afedecd3 921
23a7fe75 922 ui_out_field_uint (uiout, "insn begin", begin);
112e8700 923 uiout->text (",");
23a7fe75 924 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
925}
926
ce0dfbea
MM
927/* Compute the lowest and highest source line for the instructions in BFUN
928 and return them in PBEGIN and PEND.
929 Ignore instructions that can't be mapped to BFUN, e.g. instructions that
930 result from inlining or macro expansion. */
931
932static void
933btrace_compute_src_line_range (const struct btrace_function *bfun,
934 int *pbegin, int *pend)
935{
ce0dfbea
MM
936 struct symtab *symtab;
937 struct symbol *sym;
ce0dfbea
MM
938 int begin, end;
939
940 begin = INT_MAX;
941 end = INT_MIN;
942
943 sym = bfun->sym;
944 if (sym == NULL)
945 goto out;
946
947 symtab = symbol_symtab (sym);
948
0860c437 949 for (const btrace_insn &insn : bfun->insn)
ce0dfbea
MM
950 {
951 struct symtab_and_line sal;
952
0860c437 953 sal = find_pc_line (insn.pc, 0);
ce0dfbea
MM
954 if (sal.symtab != symtab || sal.line == 0)
955 continue;
956
325fac50
PA
957 begin = std::min (begin, sal.line);
958 end = std::max (end, sal.line);
ce0dfbea
MM
959 }
960
961 out:
962 *pbegin = begin;
963 *pend = end;
964}
965
afedecd3
MM
966/* Print the source line information for a function call history line. */
967
968static void
23a7fe75
MM
969btrace_call_history_src_line (struct ui_out *uiout,
970 const struct btrace_function *bfun)
afedecd3
MM
971{
972 struct symbol *sym;
23a7fe75 973 int begin, end;
afedecd3
MM
974
975 sym = bfun->sym;
976 if (sym == NULL)
977 return;
978
112e8700 979 uiout->field_string ("file",
08be3fe3 980 symtab_to_filename_for_display (symbol_symtab (sym)));
afedecd3 981
ce0dfbea 982 btrace_compute_src_line_range (bfun, &begin, &end);
23a7fe75 983 if (end < begin)
afedecd3
MM
984 return;
985
112e8700
SM
986 uiout->text (":");
987 uiout->field_int ("min line", begin);
afedecd3 988
23a7fe75 989 if (end == begin)
afedecd3
MM
990 return;
991
112e8700
SM
992 uiout->text (",");
993 uiout->field_int ("max line", end);
afedecd3
MM
994}
995
0b722aec
MM
996/* Get the name of a branch trace function. */
997
998static const char *
999btrace_get_bfun_name (const struct btrace_function *bfun)
1000{
1001 struct minimal_symbol *msym;
1002 struct symbol *sym;
1003
1004 if (bfun == NULL)
1005 return "??";
1006
1007 msym = bfun->msym;
1008 sym = bfun->sym;
1009
1010 if (sym != NULL)
1011 return SYMBOL_PRINT_NAME (sym);
1012 else if (msym != NULL)
efd66ac6 1013 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
1014 else
1015 return "??";
1016}
1017
afedecd3
MM
1018/* Disassemble a section of the recorded function trace. */
1019
1020static void
23a7fe75 1021btrace_call_history (struct ui_out *uiout,
8710b709 1022 const struct btrace_thread_info *btinfo,
23a7fe75
MM
1023 const struct btrace_call_iterator *begin,
1024 const struct btrace_call_iterator *end,
8d297bbf 1025 int int_flags)
afedecd3 1026{
23a7fe75 1027 struct btrace_call_iterator it;
8d297bbf 1028 record_print_flags flags = (enum record_print_flag) int_flags;
afedecd3 1029
8d297bbf 1030 DEBUG ("ftrace (0x%x): [%u; %u)", int_flags, btrace_call_number (begin),
23a7fe75 1031 btrace_call_number (end));
afedecd3 1032
23a7fe75 1033 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 1034 {
23a7fe75
MM
1035 const struct btrace_function *bfun;
1036 struct minimal_symbol *msym;
1037 struct symbol *sym;
1038
1039 bfun = btrace_call_get (&it);
23a7fe75 1040 sym = bfun->sym;
0b722aec 1041 msym = bfun->msym;
23a7fe75 1042
afedecd3 1043 /* Print the function index. */
23a7fe75 1044 ui_out_field_uint (uiout, "index", bfun->number);
112e8700 1045 uiout->text ("\t");
afedecd3 1046
31fd9caa
MM
1047 /* Indicate gaps in the trace. */
1048 if (bfun->errcode != 0)
1049 {
1050 const struct btrace_config *conf;
1051
1052 conf = btrace_conf (btinfo);
1053
1054 /* We have trace so we must have a configuration. */
1055 gdb_assert (conf != NULL);
1056
1057 btrace_ui_out_decode_error (uiout, bfun->errcode, conf->format);
1058
1059 continue;
1060 }
1061
8710b709
MM
1062 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
1063 {
1064 int level = bfun->level + btinfo->level, i;
1065
1066 for (i = 0; i < level; ++i)
112e8700 1067 uiout->text (" ");
8710b709
MM
1068 }
1069
1070 if (sym != NULL)
112e8700 1071 uiout->field_string ("function", SYMBOL_PRINT_NAME (sym));
8710b709 1072 else if (msym != NULL)
112e8700
SM
1073 uiout->field_string ("function", MSYMBOL_PRINT_NAME (msym));
1074 else if (!uiout->is_mi_like_p ())
1075 uiout->field_string ("function", "??");
8710b709 1076
1e038f67 1077 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 1078 {
112e8700 1079 uiout->text (_("\tinst "));
23a7fe75 1080 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
1081 }
1082
1e038f67 1083 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 1084 {
112e8700 1085 uiout->text (_("\tat "));
23a7fe75 1086 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
1087 }
1088
112e8700 1089 uiout->text ("\n");
afedecd3
MM
1090 }
1091}
1092
1093/* The to_call_history method of target record-btrace. */
1094
1095static void
0cb7c7b0
SM
1096record_btrace_call_history (struct target_ops *self, int size,
1097 record_print_flags flags)
afedecd3
MM
1098{
1099 struct btrace_thread_info *btinfo;
23a7fe75
MM
1100 struct btrace_call_history *history;
1101 struct btrace_call_iterator begin, end;
afedecd3 1102 struct ui_out *uiout;
23a7fe75 1103 unsigned int context, covered;
afedecd3
MM
1104
1105 uiout = current_uiout;
2e783024 1106 ui_out_emit_tuple tuple_emitter (uiout, "insn history");
afedecd3 1107 context = abs (size);
afedecd3
MM
1108 if (context == 0)
1109 error (_("Bad record function-call-history-size."));
1110
23a7fe75
MM
1111 btinfo = require_btrace ();
1112 history = btinfo->call_history;
1113 if (history == NULL)
afedecd3 1114 {
07bbe694 1115 struct btrace_insn_iterator *replay;
afedecd3 1116
0cb7c7b0 1117 DEBUG ("call-history (0x%x): %d", (int) flags, size);
afedecd3 1118
07bbe694
MM
1119 /* If we're replaying, we start at the replay position. Otherwise, we
1120 start at the tail of the trace. */
1121 replay = btinfo->replay;
1122 if (replay != NULL)
1123 {
07bbe694 1124 begin.btinfo = btinfo;
a0f1b963 1125 begin.index = replay->call_index;
07bbe694
MM
1126 }
1127 else
1128 btrace_call_end (&begin, btinfo);
1129
1130 /* We start from here and expand in the requested direction. Then we
1131 expand in the other direction, as well, to fill up any remaining
1132 context. */
1133 end = begin;
1134 if (size < 0)
1135 {
1136 /* We want the current position covered, as well. */
1137 covered = btrace_call_next (&end, 1);
1138 covered += btrace_call_prev (&begin, context - covered);
1139 covered += btrace_call_next (&end, context - covered);
1140 }
1141 else
1142 {
1143 covered = btrace_call_next (&end, context);
1144 covered += btrace_call_prev (&begin, context- covered);
1145 }
afedecd3
MM
1146 }
1147 else
1148 {
23a7fe75
MM
1149 begin = history->begin;
1150 end = history->end;
afedecd3 1151
0cb7c7b0 1152 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", (int) flags, size,
23a7fe75 1153 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 1154
23a7fe75
MM
1155 if (size < 0)
1156 {
1157 end = begin;
1158 covered = btrace_call_prev (&begin, context);
1159 }
1160 else
1161 {
1162 begin = end;
1163 covered = btrace_call_next (&end, context);
1164 }
afedecd3
MM
1165 }
1166
23a7fe75 1167 if (covered > 0)
8710b709 1168 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
1169 else
1170 {
1171 if (size < 0)
1172 printf_unfiltered (_("At the start of the branch trace record.\n"));
1173 else
1174 printf_unfiltered (_("At the end of the branch trace record.\n"));
1175 }
afedecd3 1176
23a7fe75 1177 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1178}
1179
1180/* The to_call_history_range method of target record-btrace. */
1181
1182static void
f0d960ea 1183record_btrace_call_history_range (struct target_ops *self,
8d297bbf 1184 ULONGEST from, ULONGEST to,
0cb7c7b0 1185 record_print_flags flags)
afedecd3
MM
1186{
1187 struct btrace_thread_info *btinfo;
23a7fe75 1188 struct btrace_call_iterator begin, end;
afedecd3 1189 struct ui_out *uiout;
23a7fe75
MM
1190 unsigned int low, high;
1191 int found;
afedecd3
MM
1192
1193 uiout = current_uiout;
2e783024 1194 ui_out_emit_tuple tuple_emitter (uiout, "func history");
23a7fe75
MM
1195 low = from;
1196 high = to;
afedecd3 1197
0cb7c7b0 1198 DEBUG ("call-history (0x%x): [%u; %u)", (int) flags, low, high);
afedecd3
MM
1199
1200 /* Check for wrap-arounds. */
23a7fe75 1201 if (low != from || high != to)
afedecd3
MM
1202 error (_("Bad range."));
1203
0688d04e 1204 if (high < low)
afedecd3
MM
1205 error (_("Bad range."));
1206
23a7fe75 1207 btinfo = require_btrace ();
afedecd3 1208
23a7fe75
MM
1209 found = btrace_find_call_by_number (&begin, btinfo, low);
1210 if (found == 0)
1211 error (_("Range out of bounds."));
afedecd3 1212
23a7fe75
MM
1213 found = btrace_find_call_by_number (&end, btinfo, high);
1214 if (found == 0)
0688d04e
MM
1215 {
1216 /* Silently truncate the range. */
1217 btrace_call_end (&end, btinfo);
1218 }
1219 else
1220 {
1221 /* We want both begin and end to be inclusive. */
1222 btrace_call_next (&end, 1);
1223 }
afedecd3 1224
8710b709 1225 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 1226 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
1227}
1228
1229/* The to_call_history_from method of target record-btrace. */
1230
1231static void
ec0aea04 1232record_btrace_call_history_from (struct target_ops *self,
8d297bbf 1233 ULONGEST from, int size,
0cb7c7b0 1234 record_print_flags flags)
afedecd3
MM
1235{
1236 ULONGEST begin, end, context;
1237
1238 context = abs (size);
0688d04e
MM
1239 if (context == 0)
1240 error (_("Bad record function-call-history-size."));
afedecd3
MM
1241
1242 if (size < 0)
1243 {
1244 end = from;
1245
1246 if (from < context)
1247 begin = 0;
1248 else
0688d04e 1249 begin = from - context + 1;
afedecd3
MM
1250 }
1251 else
1252 {
1253 begin = from;
0688d04e 1254 end = from + context - 1;
afedecd3
MM
1255
1256 /* Check for wrap-around. */
1257 if (end < begin)
1258 end = ULONGEST_MAX;
1259 }
1260
f0d960ea 1261 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
1262}
1263
b158a20f
TW
1264/* The to_record_method method of target record-btrace. */
1265
1266static enum record_method
1267record_btrace_record_method (struct target_ops *self, ptid_t ptid)
1268{
b158a20f
TW
1269 struct thread_info * const tp = find_thread_ptid (ptid);
1270
1271 if (tp == NULL)
1272 error (_("No thread."));
1273
1274 if (tp->btrace.target == NULL)
1275 return RECORD_METHOD_NONE;
1276
1277 return RECORD_METHOD_BTRACE;
1278}
1279
07bbe694
MM
1280/* The to_record_is_replaying method of target record-btrace. */
1281
1282static int
a52eab48 1283record_btrace_is_replaying (struct target_ops *self, ptid_t ptid)
07bbe694
MM
1284{
1285 struct thread_info *tp;
1286
034f788c 1287 ALL_NON_EXITED_THREADS (tp)
a52eab48 1288 if (ptid_match (tp->ptid, ptid) && btrace_is_replaying (tp))
07bbe694
MM
1289 return 1;
1290
1291 return 0;
1292}
1293
7ff27e9b
MM
1294/* The to_record_will_replay method of target record-btrace. */
1295
1296static int
1297record_btrace_will_replay (struct target_ops *self, ptid_t ptid, int dir)
1298{
1299 return dir == EXEC_REVERSE || record_btrace_is_replaying (self, ptid);
1300}
1301
633785ff
MM
1302/* The to_xfer_partial method of target record-btrace. */
1303
9b409511 1304static enum target_xfer_status
633785ff
MM
1305record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
1306 const char *annex, gdb_byte *readbuf,
1307 const gdb_byte *writebuf, ULONGEST offset,
9b409511 1308 ULONGEST len, ULONGEST *xfered_len)
633785ff 1309{
633785ff 1310 /* Filter out requests that don't make sense during replay. */
67b5c0c1 1311 if (replay_memory_access == replay_memory_access_read_only
aef92902 1312 && !record_btrace_generating_corefile
4d10e986 1313 && record_btrace_is_replaying (ops, inferior_ptid))
633785ff
MM
1314 {
1315 switch (object)
1316 {
1317 case TARGET_OBJECT_MEMORY:
1318 {
1319 struct target_section *section;
1320
1321 /* We do not allow writing memory in general. */
1322 if (writebuf != NULL)
9b409511
YQ
1323 {
1324 *xfered_len = len;
bc113b4e 1325 return TARGET_XFER_UNAVAILABLE;
9b409511 1326 }
633785ff
MM
1327
1328 /* We allow reading readonly memory. */
1329 section = target_section_by_addr (ops, offset);
1330 if (section != NULL)
1331 {
1332 /* Check if the section we found is readonly. */
1333 if ((bfd_get_section_flags (section->the_bfd_section->owner,
1334 section->the_bfd_section)
1335 & SEC_READONLY) != 0)
1336 {
1337 /* Truncate the request to fit into this section. */
325fac50 1338 len = std::min (len, section->endaddr - offset);
633785ff
MM
1339 break;
1340 }
1341 }
1342
9b409511 1343 *xfered_len = len;
bc113b4e 1344 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
1345 }
1346 }
1347 }
1348
1349 /* Forward the request. */
e75fdfca
TT
1350 ops = ops->beneath;
1351 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
1352 offset, len, xfered_len);
633785ff
MM
1353}
1354
1355/* The to_insert_breakpoint method of target record-btrace. */
1356
1357static int
1358record_btrace_insert_breakpoint (struct target_ops *ops,
1359 struct gdbarch *gdbarch,
1360 struct bp_target_info *bp_tgt)
1361{
67b5c0c1
MM
1362 const char *old;
1363 int ret;
633785ff
MM
1364
1365 /* Inserting breakpoints requires accessing memory. Allow it for the
1366 duration of this function. */
67b5c0c1
MM
1367 old = replay_memory_access;
1368 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1369
1370 ret = 0;
492d29ea
PA
1371 TRY
1372 {
1373 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
1374 }
492d29ea
PA
1375 CATCH (except, RETURN_MASK_ALL)
1376 {
6c63c96a 1377 replay_memory_access = old;
492d29ea
PA
1378 throw_exception (except);
1379 }
1380 END_CATCH
6c63c96a 1381 replay_memory_access = old;
633785ff
MM
1382
1383 return ret;
1384}
1385
1386/* The to_remove_breakpoint method of target record-btrace. */
1387
1388static int
1389record_btrace_remove_breakpoint (struct target_ops *ops,
1390 struct gdbarch *gdbarch,
73971819
PA
1391 struct bp_target_info *bp_tgt,
1392 enum remove_bp_reason reason)
633785ff 1393{
67b5c0c1
MM
1394 const char *old;
1395 int ret;
633785ff
MM
1396
1397 /* Removing breakpoints requires accessing memory. Allow it for the
1398 duration of this function. */
67b5c0c1
MM
1399 old = replay_memory_access;
1400 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
1401
1402 ret = 0;
492d29ea
PA
1403 TRY
1404 {
73971819
PA
1405 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt,
1406 reason);
492d29ea 1407 }
492d29ea
PA
1408 CATCH (except, RETURN_MASK_ALL)
1409 {
6c63c96a 1410 replay_memory_access = old;
492d29ea
PA
1411 throw_exception (except);
1412 }
1413 END_CATCH
6c63c96a 1414 replay_memory_access = old;
633785ff
MM
1415
1416 return ret;
1417}
1418
1f3ef581
MM
1419/* The to_fetch_registers method of target record-btrace. */
1420
1421static void
1422record_btrace_fetch_registers (struct target_ops *ops,
1423 struct regcache *regcache, int regno)
1424{
1425 struct btrace_insn_iterator *replay;
1426 struct thread_info *tp;
1427
bcc0c096 1428 tp = find_thread_ptid (regcache_get_ptid (regcache));
1f3ef581
MM
1429 gdb_assert (tp != NULL);
1430
1431 replay = tp->btrace.replay;
aef92902 1432 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
1433 {
1434 const struct btrace_insn *insn;
1435 struct gdbarch *gdbarch;
1436 int pcreg;
1437
ac7936df 1438 gdbarch = regcache->arch ();
1f3ef581
MM
1439 pcreg = gdbarch_pc_regnum (gdbarch);
1440 if (pcreg < 0)
1441 return;
1442
1443 /* We can only provide the PC register. */
1444 if (regno >= 0 && regno != pcreg)
1445 return;
1446
1447 insn = btrace_insn_get (replay);
1448 gdb_assert (insn != NULL);
1449
1450 regcache_raw_supply (regcache, regno, &insn->pc);
1451 }
1452 else
1453 {
e75fdfca 1454 struct target_ops *t = ops->beneath;
1f3ef581 1455
e75fdfca 1456 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
1457 }
1458}
1459
1460/* The to_store_registers method of target record-btrace. */
1461
1462static void
1463record_btrace_store_registers (struct target_ops *ops,
1464 struct regcache *regcache, int regno)
1465{
1466 struct target_ops *t;
1467
a52eab48 1468 if (!record_btrace_generating_corefile
bcc0c096 1469 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
4d10e986 1470 error (_("Cannot write registers while replaying."));
1f3ef581
MM
1471
1472 gdb_assert (may_write_registers != 0);
1473
e75fdfca
TT
1474 t = ops->beneath;
1475 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1476}
1477
1478/* The to_prepare_to_store method of target record-btrace. */
1479
1480static void
1481record_btrace_prepare_to_store (struct target_ops *ops,
1482 struct regcache *regcache)
1483{
1484 struct target_ops *t;
1485
a52eab48 1486 if (!record_btrace_generating_corefile
bcc0c096 1487 && record_btrace_is_replaying (ops, regcache_get_ptid (regcache)))
1f3ef581
MM
1488 return;
1489
e75fdfca
TT
1490 t = ops->beneath;
1491 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1492}
1493
0b722aec
MM
1494/* The branch trace frame cache. */
1495
1496struct btrace_frame_cache
1497{
1498 /* The thread. */
1499 struct thread_info *tp;
1500
1501 /* The frame info. */
1502 struct frame_info *frame;
1503
1504 /* The branch trace function segment. */
1505 const struct btrace_function *bfun;
1506};
1507
1508/* A struct btrace_frame_cache hash table indexed by NEXT. */
1509
1510static htab_t bfcache;
1511
1512/* hash_f for htab_create_alloc of bfcache. */
1513
1514static hashval_t
1515bfcache_hash (const void *arg)
1516{
19ba03f4
SM
1517 const struct btrace_frame_cache *cache
1518 = (const struct btrace_frame_cache *) arg;
0b722aec
MM
1519
1520 return htab_hash_pointer (cache->frame);
1521}
1522
1523/* eq_f for htab_create_alloc of bfcache. */
1524
1525static int
1526bfcache_eq (const void *arg1, const void *arg2)
1527{
19ba03f4
SM
1528 const struct btrace_frame_cache *cache1
1529 = (const struct btrace_frame_cache *) arg1;
1530 const struct btrace_frame_cache *cache2
1531 = (const struct btrace_frame_cache *) arg2;
0b722aec
MM
1532
1533 return cache1->frame == cache2->frame;
1534}
1535
1536/* Create a new btrace frame cache. */
1537
1538static struct btrace_frame_cache *
1539bfcache_new (struct frame_info *frame)
1540{
1541 struct btrace_frame_cache *cache;
1542 void **slot;
1543
1544 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1545 cache->frame = frame;
1546
1547 slot = htab_find_slot (bfcache, cache, INSERT);
1548 gdb_assert (*slot == NULL);
1549 *slot = cache;
1550
1551 return cache;
1552}
1553
1554/* Extract the branch trace function from a branch trace frame. */
1555
1556static const struct btrace_function *
1557btrace_get_frame_function (struct frame_info *frame)
1558{
1559 const struct btrace_frame_cache *cache;
0b722aec
MM
1560 struct btrace_frame_cache pattern;
1561 void **slot;
1562
1563 pattern.frame = frame;
1564
1565 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1566 if (slot == NULL)
1567 return NULL;
1568
19ba03f4 1569 cache = (const struct btrace_frame_cache *) *slot;
0b722aec
MM
1570 return cache->bfun;
1571}
1572
cecac1ab
MM
1573/* Implement stop_reason method for record_btrace_frame_unwind. */
1574
1575static enum unwind_stop_reason
1576record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1577 void **this_cache)
1578{
0b722aec
MM
1579 const struct btrace_frame_cache *cache;
1580 const struct btrace_function *bfun;
1581
19ba03f4 1582 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1583 bfun = cache->bfun;
1584 gdb_assert (bfun != NULL);
1585
42bfe59e 1586 if (bfun->up == 0)
0b722aec
MM
1587 return UNWIND_UNAVAILABLE;
1588
1589 return UNWIND_NO_REASON;
cecac1ab
MM
1590}
1591
1592/* Implement this_id method for record_btrace_frame_unwind. */
1593
1594static void
1595record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1596 struct frame_id *this_id)
1597{
0b722aec
MM
1598 const struct btrace_frame_cache *cache;
1599 const struct btrace_function *bfun;
4aeb0dfc 1600 struct btrace_call_iterator it;
0b722aec
MM
1601 CORE_ADDR code, special;
1602
19ba03f4 1603 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1604
1605 bfun = cache->bfun;
1606 gdb_assert (bfun != NULL);
1607
4aeb0dfc
TW
1608 while (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->prev) != 0)
1609 bfun = btrace_call_get (&it);
0b722aec
MM
1610
1611 code = get_frame_func (this_frame);
1612 special = bfun->number;
1613
1614 *this_id = frame_id_build_unavailable_stack_special (code, special);
1615
1616 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1617 btrace_get_bfun_name (cache->bfun),
1618 core_addr_to_string_nz (this_id->code_addr),
1619 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1620}
1621
1622/* Implement prev_register method for record_btrace_frame_unwind. */
1623
1624static struct value *
1625record_btrace_frame_prev_register (struct frame_info *this_frame,
1626 void **this_cache,
1627 int regnum)
1628{
0b722aec
MM
1629 const struct btrace_frame_cache *cache;
1630 const struct btrace_function *bfun, *caller;
42bfe59e 1631 struct btrace_call_iterator it;
0b722aec
MM
1632 struct gdbarch *gdbarch;
1633 CORE_ADDR pc;
1634 int pcreg;
1635
1636 gdbarch = get_frame_arch (this_frame);
1637 pcreg = gdbarch_pc_regnum (gdbarch);
1638 if (pcreg < 0 || regnum != pcreg)
1639 throw_error (NOT_AVAILABLE_ERROR,
1640 _("Registers are not available in btrace record history"));
1641
19ba03f4 1642 cache = (const struct btrace_frame_cache *) *this_cache;
0b722aec
MM
1643 bfun = cache->bfun;
1644 gdb_assert (bfun != NULL);
1645
42bfe59e 1646 if (btrace_find_call_by_number (&it, &cache->tp->btrace, bfun->up) == 0)
0b722aec
MM
1647 throw_error (NOT_AVAILABLE_ERROR,
1648 _("No caller in btrace record history"));
1649
42bfe59e
TW
1650 caller = btrace_call_get (&it);
1651
0b722aec 1652 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
0860c437 1653 pc = caller->insn.front ().pc;
0b722aec
MM
1654 else
1655 {
0860c437 1656 pc = caller->insn.back ().pc;
0b722aec
MM
1657 pc += gdb_insn_length (gdbarch, pc);
1658 }
1659
1660 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1661 btrace_get_bfun_name (bfun), bfun->level,
1662 core_addr_to_string_nz (pc));
1663
1664 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1665}
1666
1667/* Implement sniffer method for record_btrace_frame_unwind. */
1668
1669static int
1670record_btrace_frame_sniffer (const struct frame_unwind *self,
1671 struct frame_info *this_frame,
1672 void **this_cache)
1673{
0b722aec
MM
1674 const struct btrace_function *bfun;
1675 struct btrace_frame_cache *cache;
cecac1ab 1676 struct thread_info *tp;
0b722aec 1677 struct frame_info *next;
cecac1ab
MM
1678
1679 /* THIS_FRAME does not contain a reference to its thread. */
1680 tp = find_thread_ptid (inferior_ptid);
1681 gdb_assert (tp != NULL);
1682
0b722aec
MM
1683 bfun = NULL;
1684 next = get_next_frame (this_frame);
1685 if (next == NULL)
1686 {
1687 const struct btrace_insn_iterator *replay;
1688
1689 replay = tp->btrace.replay;
1690 if (replay != NULL)
08c3f6d2 1691 bfun = &replay->btinfo->functions[replay->call_index];
0b722aec
MM
1692 }
1693 else
1694 {
1695 const struct btrace_function *callee;
42bfe59e 1696 struct btrace_call_iterator it;
0b722aec
MM
1697
1698 callee = btrace_get_frame_function (next);
42bfe59e
TW
1699 if (callee == NULL || (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) != 0)
1700 return 0;
1701
1702 if (btrace_find_call_by_number (&it, &tp->btrace, callee->up) == 0)
1703 return 0;
1704
1705 bfun = btrace_call_get (&it);
0b722aec
MM
1706 }
1707
1708 if (bfun == NULL)
1709 return 0;
1710
1711 DEBUG ("[frame] sniffed frame for %s on level %d",
1712 btrace_get_bfun_name (bfun), bfun->level);
1713
1714 /* This is our frame. Initialize the frame cache. */
1715 cache = bfcache_new (this_frame);
1716 cache->tp = tp;
1717 cache->bfun = bfun;
1718
1719 *this_cache = cache;
1720 return 1;
1721}
1722
1723/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1724
1725static int
1726record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1727 struct frame_info *this_frame,
1728 void **this_cache)
1729{
1730 const struct btrace_function *bfun, *callee;
1731 struct btrace_frame_cache *cache;
42bfe59e 1732 struct btrace_call_iterator it;
0b722aec 1733 struct frame_info *next;
42bfe59e 1734 struct thread_info *tinfo;
0b722aec
MM
1735
1736 next = get_next_frame (this_frame);
1737 if (next == NULL)
1738 return 0;
1739
1740 callee = btrace_get_frame_function (next);
1741 if (callee == NULL)
1742 return 0;
1743
1744 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1745 return 0;
1746
42bfe59e
TW
1747 tinfo = find_thread_ptid (inferior_ptid);
1748 if (btrace_find_call_by_number (&it, &tinfo->btrace, callee->up) == 0)
0b722aec
MM
1749 return 0;
1750
42bfe59e
TW
1751 bfun = btrace_call_get (&it);
1752
0b722aec
MM
1753 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1754 btrace_get_bfun_name (bfun), bfun->level);
1755
1756 /* This is our frame. Initialize the frame cache. */
1757 cache = bfcache_new (this_frame);
42bfe59e 1758 cache->tp = tinfo;
0b722aec
MM
1759 cache->bfun = bfun;
1760
1761 *this_cache = cache;
1762 return 1;
1763}
1764
1765static void
1766record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1767{
1768 struct btrace_frame_cache *cache;
1769 void **slot;
1770
19ba03f4 1771 cache = (struct btrace_frame_cache *) this_cache;
0b722aec
MM
1772
1773 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1774 gdb_assert (slot != NULL);
1775
1776 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1777}
1778
1779/* btrace recording does not store previous memory content, neither the stack
1780 frames content. Any unwinding would return errorneous results as the stack
1781 contents no longer matches the changed PC value restored from history.
1782 Therefore this unwinder reports any possibly unwound registers as
1783 <unavailable>. */
1784
0b722aec 1785const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1786{
1787 NORMAL_FRAME,
1788 record_btrace_frame_unwind_stop_reason,
1789 record_btrace_frame_this_id,
1790 record_btrace_frame_prev_register,
1791 NULL,
0b722aec
MM
1792 record_btrace_frame_sniffer,
1793 record_btrace_frame_dealloc_cache
1794};
1795
1796const struct frame_unwind record_btrace_tailcall_frame_unwind =
1797{
1798 TAILCALL_FRAME,
1799 record_btrace_frame_unwind_stop_reason,
1800 record_btrace_frame_this_id,
1801 record_btrace_frame_prev_register,
1802 NULL,
1803 record_btrace_tailcall_frame_sniffer,
1804 record_btrace_frame_dealloc_cache
cecac1ab 1805};
b2f4cfde 1806
ac01945b
TT
1807/* Implement the to_get_unwinder method. */
1808
1809static const struct frame_unwind *
1810record_btrace_to_get_unwinder (struct target_ops *self)
1811{
1812 return &record_btrace_frame_unwind;
1813}
1814
1815/* Implement the to_get_tailcall_unwinder method. */
1816
1817static const struct frame_unwind *
1818record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1819{
1820 return &record_btrace_tailcall_frame_unwind;
1821}
1822
987e68b1
MM
1823/* Return a human-readable string for FLAG. */
1824
1825static const char *
1826btrace_thread_flag_to_str (enum btrace_thread_flag flag)
1827{
1828 switch (flag)
1829 {
1830 case BTHR_STEP:
1831 return "step";
1832
1833 case BTHR_RSTEP:
1834 return "reverse-step";
1835
1836 case BTHR_CONT:
1837 return "cont";
1838
1839 case BTHR_RCONT:
1840 return "reverse-cont";
1841
1842 case BTHR_STOP:
1843 return "stop";
1844 }
1845
1846 return "<invalid>";
1847}
1848
52834460
MM
1849/* Indicate that TP should be resumed according to FLAG. */
1850
1851static void
1852record_btrace_resume_thread (struct thread_info *tp,
1853 enum btrace_thread_flag flag)
1854{
1855 struct btrace_thread_info *btinfo;
1856
43792cf0 1857 DEBUG ("resuming thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1 1858 target_pid_to_str (tp->ptid), flag, btrace_thread_flag_to_str (flag));
52834460
MM
1859
1860 btinfo = &tp->btrace;
1861
52834460
MM
1862 /* Fetch the latest branch trace. */
1863 btrace_fetch (tp);
1864
0ca912df
MM
1865 /* A resume request overwrites a preceding resume or stop request. */
1866 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460
MM
1867 btinfo->flags |= flag;
1868}
1869
ec71cc2f
MM
1870/* Get the current frame for TP. */
1871
1872static struct frame_info *
1873get_thread_current_frame (struct thread_info *tp)
1874{
1875 struct frame_info *frame;
1876 ptid_t old_inferior_ptid;
1877 int executing;
1878
1879 /* Set INFERIOR_PTID, which is implicitly used by get_current_frame. */
1880 old_inferior_ptid = inferior_ptid;
1881 inferior_ptid = tp->ptid;
1882
1883 /* Clear the executing flag to allow changes to the current frame.
1884 We are not actually running, yet. We just started a reverse execution
1885 command or a record goto command.
1886 For the latter, EXECUTING is false and this has no effect.
1887 For the former, EXECUTING is true and we're in to_wait, about to
1888 move the thread. Since we need to recompute the stack, we temporarily
1889 set EXECUTING to flase. */
1890 executing = is_executing (inferior_ptid);
1891 set_executing (inferior_ptid, 0);
1892
1893 frame = NULL;
1894 TRY
1895 {
1896 frame = get_current_frame ();
1897 }
1898 CATCH (except, RETURN_MASK_ALL)
1899 {
1900 /* Restore the previous execution state. */
1901 set_executing (inferior_ptid, executing);
1902
1903 /* Restore the previous inferior_ptid. */
1904 inferior_ptid = old_inferior_ptid;
1905
1906 throw_exception (except);
1907 }
1908 END_CATCH
1909
1910 /* Restore the previous execution state. */
1911 set_executing (inferior_ptid, executing);
1912
1913 /* Restore the previous inferior_ptid. */
1914 inferior_ptid = old_inferior_ptid;
1915
1916 return frame;
1917}
1918
52834460
MM
1919/* Start replaying a thread. */
1920
1921static struct btrace_insn_iterator *
1922record_btrace_start_replaying (struct thread_info *tp)
1923{
52834460
MM
1924 struct btrace_insn_iterator *replay;
1925 struct btrace_thread_info *btinfo;
52834460
MM
1926
1927 btinfo = &tp->btrace;
1928 replay = NULL;
1929
1930 /* We can't start replaying without trace. */
b54b03bd 1931 if (btinfo->functions.empty ())
52834460
MM
1932 return NULL;
1933
52834460
MM
1934 /* GDB stores the current frame_id when stepping in order to detects steps
1935 into subroutines.
1936 Since frames are computed differently when we're replaying, we need to
1937 recompute those stored frames and fix them up so we can still detect
1938 subroutines after we started replaying. */
492d29ea 1939 TRY
52834460
MM
1940 {
1941 struct frame_info *frame;
1942 struct frame_id frame_id;
1943 int upd_step_frame_id, upd_step_stack_frame_id;
1944
1945 /* The current frame without replaying - computed via normal unwind. */
ec71cc2f 1946 frame = get_thread_current_frame (tp);
52834460
MM
1947 frame_id = get_frame_id (frame);
1948
1949 /* Check if we need to update any stepping-related frame id's. */
1950 upd_step_frame_id = frame_id_eq (frame_id,
1951 tp->control.step_frame_id);
1952 upd_step_stack_frame_id = frame_id_eq (frame_id,
1953 tp->control.step_stack_frame_id);
1954
1955 /* We start replaying at the end of the branch trace. This corresponds
1956 to the current instruction. */
8d749320 1957 replay = XNEW (struct btrace_insn_iterator);
52834460
MM
1958 btrace_insn_end (replay, btinfo);
1959
31fd9caa
MM
1960 /* Skip gaps at the end of the trace. */
1961 while (btrace_insn_get (replay) == NULL)
1962 {
1963 unsigned int steps;
1964
1965 steps = btrace_insn_prev (replay, 1);
1966 if (steps == 0)
1967 error (_("No trace."));
1968 }
1969
52834460
MM
1970 /* We're not replaying, yet. */
1971 gdb_assert (btinfo->replay == NULL);
1972 btinfo->replay = replay;
1973
1974 /* Make sure we're not using any stale registers. */
1975 registers_changed_ptid (tp->ptid);
1976
1977 /* The current frame with replaying - computed via btrace unwind. */
ec71cc2f 1978 frame = get_thread_current_frame (tp);
52834460
MM
1979 frame_id = get_frame_id (frame);
1980
1981 /* Replace stepping related frames where necessary. */
1982 if (upd_step_frame_id)
1983 tp->control.step_frame_id = frame_id;
1984 if (upd_step_stack_frame_id)
1985 tp->control.step_stack_frame_id = frame_id;
1986 }
492d29ea 1987 CATCH (except, RETURN_MASK_ALL)
52834460
MM
1988 {
1989 xfree (btinfo->replay);
1990 btinfo->replay = NULL;
1991
1992 registers_changed_ptid (tp->ptid);
1993
1994 throw_exception (except);
1995 }
492d29ea 1996 END_CATCH
52834460
MM
1997
1998 return replay;
1999}
2000
2001/* Stop replaying a thread. */
2002
2003static void
2004record_btrace_stop_replaying (struct thread_info *tp)
2005{
2006 struct btrace_thread_info *btinfo;
2007
2008 btinfo = &tp->btrace;
2009
2010 xfree (btinfo->replay);
2011 btinfo->replay = NULL;
2012
2013 /* Make sure we're not leaving any stale registers. */
2014 registers_changed_ptid (tp->ptid);
2015}
2016
e3cfc1c7
MM
2017/* Stop replaying TP if it is at the end of its execution history. */
2018
2019static void
2020record_btrace_stop_replaying_at_end (struct thread_info *tp)
2021{
2022 struct btrace_insn_iterator *replay, end;
2023 struct btrace_thread_info *btinfo;
2024
2025 btinfo = &tp->btrace;
2026 replay = btinfo->replay;
2027
2028 if (replay == NULL)
2029 return;
2030
2031 btrace_insn_end (&end, btinfo);
2032
2033 if (btrace_insn_cmp (replay, &end) == 0)
2034 record_btrace_stop_replaying (tp);
2035}
2036
b2f4cfde
MM
2037/* The to_resume method of target record-btrace. */
2038
2039static void
2040record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
2041 enum gdb_signal signal)
2042{
0ca912df 2043 struct thread_info *tp;
d2939ba2 2044 enum btrace_thread_flag flag, cflag;
52834460 2045
987e68b1
MM
2046 DEBUG ("resume %s: %s%s", target_pid_to_str (ptid),
2047 execution_direction == EXEC_REVERSE ? "reverse-" : "",
2048 step ? "step" : "cont");
52834460 2049
0ca912df
MM
2050 /* Store the execution direction of the last resume.
2051
2052 If there is more than one to_resume call, we have to rely on infrun
2053 to not change the execution direction in-between. */
70ad5bff
MM
2054 record_btrace_resume_exec_dir = execution_direction;
2055
0ca912df 2056 /* As long as we're not replaying, just forward the request.
52834460 2057
0ca912df
MM
2058 For non-stop targets this means that no thread is replaying. In order to
2059 make progress, we may need to explicitly move replaying threads to the end
2060 of their execution history. */
a52eab48
MM
2061 if ((execution_direction != EXEC_REVERSE)
2062 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2063 {
e75fdfca 2064 ops = ops->beneath;
04c4fe8c
MM
2065 ops->to_resume (ops, ptid, step, signal);
2066 return;
b2f4cfde
MM
2067 }
2068
52834460 2069 /* Compute the btrace thread flag for the requested move. */
d2939ba2
MM
2070 if (execution_direction == EXEC_REVERSE)
2071 {
2072 flag = step == 0 ? BTHR_RCONT : BTHR_RSTEP;
2073 cflag = BTHR_RCONT;
2074 }
52834460 2075 else
d2939ba2
MM
2076 {
2077 flag = step == 0 ? BTHR_CONT : BTHR_STEP;
2078 cflag = BTHR_CONT;
2079 }
52834460 2080
52834460 2081 /* We just indicate the resume intent here. The actual stepping happens in
d2939ba2
MM
2082 record_btrace_wait below.
2083
2084 For all-stop targets, we only step INFERIOR_PTID and continue others. */
2085 if (!target_is_non_stop_p ())
2086 {
2087 gdb_assert (ptid_match (inferior_ptid, ptid));
2088
2089 ALL_NON_EXITED_THREADS (tp)
2090 if (ptid_match (tp->ptid, ptid))
2091 {
2092 if (ptid_match (tp->ptid, inferior_ptid))
2093 record_btrace_resume_thread (tp, flag);
2094 else
2095 record_btrace_resume_thread (tp, cflag);
2096 }
2097 }
2098 else
2099 {
2100 ALL_NON_EXITED_THREADS (tp)
2101 if (ptid_match (tp->ptid, ptid))
2102 record_btrace_resume_thread (tp, flag);
2103 }
70ad5bff
MM
2104
2105 /* Async support. */
2106 if (target_can_async_p ())
2107 {
6a3753b3 2108 target_async (1);
70ad5bff
MM
2109 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2110 }
52834460
MM
2111}
2112
85ad3aaf
PA
2113/* The to_commit_resume method of target record-btrace. */
2114
2115static void
2116record_btrace_commit_resume (struct target_ops *ops)
2117{
2118 if ((execution_direction != EXEC_REVERSE)
2119 && !record_btrace_is_replaying (ops, minus_one_ptid))
2120 ops->beneath->to_commit_resume (ops->beneath);
2121}
2122
987e68b1
MM
2123/* Cancel resuming TP. */
2124
2125static void
2126record_btrace_cancel_resume (struct thread_info *tp)
2127{
2128 enum btrace_thread_flag flags;
2129
2130 flags = tp->btrace.flags & (BTHR_MOVE | BTHR_STOP);
2131 if (flags == 0)
2132 return;
2133
43792cf0
PA
2134 DEBUG ("cancel resume thread %s (%s): %x (%s)",
2135 print_thread_id (tp),
987e68b1
MM
2136 target_pid_to_str (tp->ptid), flags,
2137 btrace_thread_flag_to_str (flags));
2138
2139 tp->btrace.flags &= ~(BTHR_MOVE | BTHR_STOP);
e3cfc1c7 2140 record_btrace_stop_replaying_at_end (tp);
52834460
MM
2141}
2142
2143/* Return a target_waitstatus indicating that we ran out of history. */
2144
2145static struct target_waitstatus
2146btrace_step_no_history (void)
2147{
2148 struct target_waitstatus status;
2149
2150 status.kind = TARGET_WAITKIND_NO_HISTORY;
2151
2152 return status;
2153}
2154
2155/* Return a target_waitstatus indicating that a step finished. */
2156
2157static struct target_waitstatus
2158btrace_step_stopped (void)
2159{
2160 struct target_waitstatus status;
2161
2162 status.kind = TARGET_WAITKIND_STOPPED;
2163 status.value.sig = GDB_SIGNAL_TRAP;
2164
2165 return status;
2166}
2167
6e4879f0
MM
2168/* Return a target_waitstatus indicating that a thread was stopped as
2169 requested. */
2170
2171static struct target_waitstatus
2172btrace_step_stopped_on_request (void)
2173{
2174 struct target_waitstatus status;
2175
2176 status.kind = TARGET_WAITKIND_STOPPED;
2177 status.value.sig = GDB_SIGNAL_0;
2178
2179 return status;
2180}
2181
d825d248
MM
2182/* Return a target_waitstatus indicating a spurious stop. */
2183
2184static struct target_waitstatus
2185btrace_step_spurious (void)
2186{
2187 struct target_waitstatus status;
2188
2189 status.kind = TARGET_WAITKIND_SPURIOUS;
2190
2191 return status;
2192}
2193
e3cfc1c7
MM
2194/* Return a target_waitstatus indicating that the thread was not resumed. */
2195
2196static struct target_waitstatus
2197btrace_step_no_resumed (void)
2198{
2199 struct target_waitstatus status;
2200
2201 status.kind = TARGET_WAITKIND_NO_RESUMED;
2202
2203 return status;
2204}
2205
2206/* Return a target_waitstatus indicating that we should wait again. */
2207
2208static struct target_waitstatus
2209btrace_step_again (void)
2210{
2211 struct target_waitstatus status;
2212
2213 status.kind = TARGET_WAITKIND_IGNORE;
2214
2215 return status;
2216}
2217
52834460
MM
2218/* Clear the record histories. */
2219
2220static void
2221record_btrace_clear_histories (struct btrace_thread_info *btinfo)
2222{
2223 xfree (btinfo->insn_history);
2224 xfree (btinfo->call_history);
2225
2226 btinfo->insn_history = NULL;
2227 btinfo->call_history = NULL;
2228}
2229
3c615f99
MM
2230/* Check whether TP's current replay position is at a breakpoint. */
2231
2232static int
2233record_btrace_replay_at_breakpoint (struct thread_info *tp)
2234{
2235 struct btrace_insn_iterator *replay;
2236 struct btrace_thread_info *btinfo;
2237 const struct btrace_insn *insn;
2238 struct inferior *inf;
2239
2240 btinfo = &tp->btrace;
2241 replay = btinfo->replay;
2242
2243 if (replay == NULL)
2244 return 0;
2245
2246 insn = btrace_insn_get (replay);
2247 if (insn == NULL)
2248 return 0;
2249
2250 inf = find_inferior_ptid (tp->ptid);
2251 if (inf == NULL)
2252 return 0;
2253
2254 return record_check_stopped_by_breakpoint (inf->aspace, insn->pc,
2255 &btinfo->stop_reason);
2256}
2257
d825d248 2258/* Step one instruction in forward direction. */
52834460
MM
2259
2260static struct target_waitstatus
d825d248 2261record_btrace_single_step_forward (struct thread_info *tp)
52834460 2262{
b61ce85c 2263 struct btrace_insn_iterator *replay, end, start;
52834460 2264 struct btrace_thread_info *btinfo;
52834460 2265
d825d248
MM
2266 btinfo = &tp->btrace;
2267 replay = btinfo->replay;
2268
2269 /* We're done if we're not replaying. */
2270 if (replay == NULL)
2271 return btrace_step_no_history ();
2272
011c71b6
MM
2273 /* Check if we're stepping a breakpoint. */
2274 if (record_btrace_replay_at_breakpoint (tp))
2275 return btrace_step_stopped ();
2276
b61ce85c
MM
2277 /* Skip gaps during replay. If we end up at a gap (at the end of the trace),
2278 jump back to the instruction at which we started. */
2279 start = *replay;
d825d248
MM
2280 do
2281 {
2282 unsigned int steps;
2283
e3cfc1c7
MM
2284 /* We will bail out here if we continue stepping after reaching the end
2285 of the execution history. */
d825d248
MM
2286 steps = btrace_insn_next (replay, 1);
2287 if (steps == 0)
b61ce85c
MM
2288 {
2289 *replay = start;
2290 return btrace_step_no_history ();
2291 }
d825d248
MM
2292 }
2293 while (btrace_insn_get (replay) == NULL);
2294
2295 /* Determine the end of the instruction trace. */
2296 btrace_insn_end (&end, btinfo);
2297
e3cfc1c7
MM
2298 /* The execution trace contains (and ends with) the current instruction.
2299 This instruction has not been executed, yet, so the trace really ends
2300 one instruction earlier. */
d825d248 2301 if (btrace_insn_cmp (replay, &end) == 0)
e3cfc1c7 2302 return btrace_step_no_history ();
d825d248
MM
2303
2304 return btrace_step_spurious ();
2305}
2306
2307/* Step one instruction in backward direction. */
2308
2309static struct target_waitstatus
2310record_btrace_single_step_backward (struct thread_info *tp)
2311{
b61ce85c 2312 struct btrace_insn_iterator *replay, start;
d825d248 2313 struct btrace_thread_info *btinfo;
e59fa00f 2314
52834460
MM
2315 btinfo = &tp->btrace;
2316 replay = btinfo->replay;
2317
d825d248
MM
2318 /* Start replaying if we're not already doing so. */
2319 if (replay == NULL)
2320 replay = record_btrace_start_replaying (tp);
2321
2322 /* If we can't step any further, we reached the end of the history.
b61ce85c
MM
2323 Skip gaps during replay. If we end up at a gap (at the beginning of
2324 the trace), jump back to the instruction at which we started. */
2325 start = *replay;
d825d248
MM
2326 do
2327 {
2328 unsigned int steps;
2329
2330 steps = btrace_insn_prev (replay, 1);
2331 if (steps == 0)
b61ce85c
MM
2332 {
2333 *replay = start;
2334 return btrace_step_no_history ();
2335 }
d825d248
MM
2336 }
2337 while (btrace_insn_get (replay) == NULL);
2338
011c71b6
MM
2339 /* Check if we're stepping a breakpoint.
2340
2341 For reverse-stepping, this check is after the step. There is logic in
2342 infrun.c that handles reverse-stepping separately. See, for example,
2343 proceed and adjust_pc_after_break.
2344
2345 This code assumes that for reverse-stepping, PC points to the last
2346 de-executed instruction, whereas for forward-stepping PC points to the
2347 next to-be-executed instruction. */
2348 if (record_btrace_replay_at_breakpoint (tp))
2349 return btrace_step_stopped ();
2350
d825d248
MM
2351 return btrace_step_spurious ();
2352}
2353
2354/* Step a single thread. */
2355
2356static struct target_waitstatus
2357record_btrace_step_thread (struct thread_info *tp)
2358{
2359 struct btrace_thread_info *btinfo;
2360 struct target_waitstatus status;
2361 enum btrace_thread_flag flags;
2362
2363 btinfo = &tp->btrace;
2364
6e4879f0
MM
2365 flags = btinfo->flags & (BTHR_MOVE | BTHR_STOP);
2366 btinfo->flags &= ~(BTHR_MOVE | BTHR_STOP);
52834460 2367
43792cf0 2368 DEBUG ("stepping thread %s (%s): %x (%s)", print_thread_id (tp),
987e68b1
MM
2369 target_pid_to_str (tp->ptid), flags,
2370 btrace_thread_flag_to_str (flags));
52834460 2371
6e4879f0
MM
2372 /* We can't step without an execution history. */
2373 if ((flags & BTHR_MOVE) != 0 && btrace_is_empty (tp))
2374 return btrace_step_no_history ();
2375
52834460
MM
2376 switch (flags)
2377 {
2378 default:
2379 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
2380
6e4879f0
MM
2381 case BTHR_STOP:
2382 return btrace_step_stopped_on_request ();
2383
52834460 2384 case BTHR_STEP:
d825d248
MM
2385 status = record_btrace_single_step_forward (tp);
2386 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2387 break;
52834460
MM
2388
2389 return btrace_step_stopped ();
2390
2391 case BTHR_RSTEP:
d825d248
MM
2392 status = record_btrace_single_step_backward (tp);
2393 if (status.kind != TARGET_WAITKIND_SPURIOUS)
e3cfc1c7 2394 break;
52834460
MM
2395
2396 return btrace_step_stopped ();
2397
2398 case BTHR_CONT:
e3cfc1c7
MM
2399 status = record_btrace_single_step_forward (tp);
2400 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2401 break;
52834460 2402
e3cfc1c7
MM
2403 btinfo->flags |= flags;
2404 return btrace_step_again ();
52834460
MM
2405
2406 case BTHR_RCONT:
e3cfc1c7
MM
2407 status = record_btrace_single_step_backward (tp);
2408 if (status.kind != TARGET_WAITKIND_SPURIOUS)
2409 break;
52834460 2410
e3cfc1c7
MM
2411 btinfo->flags |= flags;
2412 return btrace_step_again ();
2413 }
d825d248 2414
e3cfc1c7
MM
2415 /* We keep threads moving at the end of their execution history. The to_wait
2416 method will stop the thread for whom the event is reported. */
2417 if (status.kind == TARGET_WAITKIND_NO_HISTORY)
2418 btinfo->flags |= flags;
52834460 2419
e3cfc1c7 2420 return status;
b2f4cfde
MM
2421}
2422
e3cfc1c7
MM
2423/* A vector of threads. */
2424
2425typedef struct thread_info * tp_t;
2426DEF_VEC_P (tp_t);
2427
a6b5be76
MM
2428/* Announce further events if necessary. */
2429
2430static void
53127008
SM
2431record_btrace_maybe_mark_async_event
2432 (const std::vector<thread_info *> &moving,
2433 const std::vector<thread_info *> &no_history)
a6b5be76 2434{
53127008
SM
2435 bool more_moving = !moving.empty ();
2436 bool more_no_history = !no_history.empty ();;
a6b5be76
MM
2437
2438 if (!more_moving && !more_no_history)
2439 return;
2440
2441 if (more_moving)
2442 DEBUG ("movers pending");
2443
2444 if (more_no_history)
2445 DEBUG ("no-history pending");
2446
2447 mark_async_event_handler (record_btrace_async_inferior_event_handler);
2448}
2449
b2f4cfde
MM
2450/* The to_wait method of target record-btrace. */
2451
2452static ptid_t
2453record_btrace_wait (struct target_ops *ops, ptid_t ptid,
2454 struct target_waitstatus *status, int options)
2455{
53127008
SM
2456 std::vector<thread_info *> moving;
2457 std::vector<thread_info *> no_history;
52834460
MM
2458
2459 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
2460
b2f4cfde 2461 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2462 if ((execution_direction != EXEC_REVERSE)
2463 && !record_btrace_is_replaying (ops, minus_one_ptid))
b2f4cfde 2464 {
e75fdfca
TT
2465 ops = ops->beneath;
2466 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
2467 }
2468
e3cfc1c7 2469 /* Keep a work list of moving threads. */
53127008
SM
2470 {
2471 thread_info *tp;
2472
2473 ALL_NON_EXITED_THREADS (tp)
2474 {
2475 if (ptid_match (tp->ptid, ptid)
2476 && ((tp->btrace.flags & (BTHR_MOVE | BTHR_STOP)) != 0))
2477 moving.push_back (tp);
2478 }
2479 }
e3cfc1c7 2480
53127008 2481 if (moving.empty ())
52834460 2482 {
e3cfc1c7 2483 *status = btrace_step_no_resumed ();
52834460 2484
e3cfc1c7 2485 DEBUG ("wait ended by %s: %s", target_pid_to_str (null_ptid),
23fdd69e 2486 target_waitstatus_to_string (status).c_str ());
e3cfc1c7 2487
e3cfc1c7 2488 return null_ptid;
52834460
MM
2489 }
2490
e3cfc1c7
MM
2491 /* Step moving threads one by one, one step each, until either one thread
2492 reports an event or we run out of threads to step.
2493
2494 When stepping more than one thread, chances are that some threads reach
2495 the end of their execution history earlier than others. If we reported
2496 this immediately, all-stop on top of non-stop would stop all threads and
2497 resume the same threads next time. And we would report the same thread
2498 having reached the end of its execution history again.
2499
2500 In the worst case, this would starve the other threads. But even if other
2501 threads would be allowed to make progress, this would result in far too
2502 many intermediate stops.
2503
2504 We therefore delay the reporting of "no execution history" until we have
2505 nothing else to report. By this time, all threads should have moved to
2506 either the beginning or the end of their execution history. There will
2507 be a single user-visible stop. */
53127008
SM
2508 struct thread_info *eventing = NULL;
2509 while ((eventing == NULL) && !moving.empty ())
e3cfc1c7 2510 {
53127008 2511 for (unsigned int ix = 0; eventing == NULL && ix < moving.size ();)
e3cfc1c7 2512 {
53127008
SM
2513 thread_info *tp = moving[ix];
2514
e3cfc1c7
MM
2515 *status = record_btrace_step_thread (tp);
2516
2517 switch (status->kind)
2518 {
2519 case TARGET_WAITKIND_IGNORE:
2520 ix++;
2521 break;
2522
2523 case TARGET_WAITKIND_NO_HISTORY:
53127008 2524 no_history.push_back (ordered_remove (moving, ix));
e3cfc1c7
MM
2525 break;
2526
2527 default:
53127008 2528 eventing = unordered_remove (moving, ix);
e3cfc1c7
MM
2529 break;
2530 }
2531 }
2532 }
2533
2534 if (eventing == NULL)
2535 {
2536 /* We started with at least one moving thread. This thread must have
2537 either stopped or reached the end of its execution history.
2538
2539 In the former case, EVENTING must not be NULL.
2540 In the latter case, NO_HISTORY must not be empty. */
53127008 2541 gdb_assert (!no_history.empty ());
e3cfc1c7
MM
2542
2543 /* We kept threads moving at the end of their execution history. Stop
2544 EVENTING now that we are going to report its stop. */
53127008 2545 eventing = unordered_remove (no_history, 0);
e3cfc1c7
MM
2546 eventing->btrace.flags &= ~BTHR_MOVE;
2547
2548 *status = btrace_step_no_history ();
2549 }
2550
2551 gdb_assert (eventing != NULL);
2552
2553 /* We kept threads replaying at the end of their execution history. Stop
2554 replaying EVENTING now that we are going to report its stop. */
2555 record_btrace_stop_replaying_at_end (eventing);
52834460
MM
2556
2557 /* Stop all other threads. */
5953356c 2558 if (!target_is_non_stop_p ())
53127008
SM
2559 {
2560 thread_info *tp;
2561
2562 ALL_NON_EXITED_THREADS (tp)
2563 record_btrace_cancel_resume (tp);
2564 }
52834460 2565
a6b5be76
MM
2566 /* In async mode, we need to announce further events. */
2567 if (target_is_async_p ())
2568 record_btrace_maybe_mark_async_event (moving, no_history);
2569
52834460 2570 /* Start record histories anew from the current position. */
e3cfc1c7 2571 record_btrace_clear_histories (&eventing->btrace);
52834460
MM
2572
2573 /* We moved the replay position but did not update registers. */
e3cfc1c7
MM
2574 registers_changed_ptid (eventing->ptid);
2575
43792cf0
PA
2576 DEBUG ("wait ended by thread %s (%s): %s",
2577 print_thread_id (eventing),
e3cfc1c7 2578 target_pid_to_str (eventing->ptid),
23fdd69e 2579 target_waitstatus_to_string (status).c_str ());
52834460 2580
e3cfc1c7 2581 return eventing->ptid;
52834460
MM
2582}
2583
6e4879f0
MM
2584/* The to_stop method of target record-btrace. */
2585
2586static void
2587record_btrace_stop (struct target_ops *ops, ptid_t ptid)
2588{
2589 DEBUG ("stop %s", target_pid_to_str (ptid));
2590
2591 /* As long as we're not replaying, just forward the request. */
a52eab48
MM
2592 if ((execution_direction != EXEC_REVERSE)
2593 && !record_btrace_is_replaying (ops, minus_one_ptid))
6e4879f0
MM
2594 {
2595 ops = ops->beneath;
2596 ops->to_stop (ops, ptid);
2597 }
2598 else
2599 {
2600 struct thread_info *tp;
2601
2602 ALL_NON_EXITED_THREADS (tp)
2603 if (ptid_match (tp->ptid, ptid))
2604 {
2605 tp->btrace.flags &= ~BTHR_MOVE;
2606 tp->btrace.flags |= BTHR_STOP;
2607 }
2608 }
2609 }
2610
52834460
MM
2611/* The to_can_execute_reverse method of target record-btrace. */
2612
2613static int
19db3e69 2614record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
2615{
2616 return 1;
2617}
2618
9e8915c6 2619/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
52834460 2620
9e8915c6
PA
2621static int
2622record_btrace_stopped_by_sw_breakpoint (struct target_ops *ops)
52834460 2623{
a52eab48 2624 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2625 {
2626 struct thread_info *tp = inferior_thread ();
2627
2628 return tp->btrace.stop_reason == TARGET_STOPPED_BY_SW_BREAKPOINT;
2629 }
2630
2631 return ops->beneath->to_stopped_by_sw_breakpoint (ops->beneath);
2632}
2633
2634/* The to_supports_stopped_by_sw_breakpoint method of target
2635 record-btrace. */
2636
2637static int
2638record_btrace_supports_stopped_by_sw_breakpoint (struct target_ops *ops)
2639{
a52eab48 2640 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2641 return 1;
2642
2643 return ops->beneath->to_supports_stopped_by_sw_breakpoint (ops->beneath);
2644}
2645
2646/* The to_stopped_by_sw_breakpoint method of target record-btrace. */
2647
2648static int
2649record_btrace_stopped_by_hw_breakpoint (struct target_ops *ops)
2650{
a52eab48 2651 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6
PA
2652 {
2653 struct thread_info *tp = inferior_thread ();
2654
2655 return tp->btrace.stop_reason == TARGET_STOPPED_BY_HW_BREAKPOINT;
2656 }
2657
2658 return ops->beneath->to_stopped_by_hw_breakpoint (ops->beneath);
2659}
2660
2661/* The to_supports_stopped_by_hw_breakpoint method of target
2662 record-btrace. */
2663
2664static int
2665record_btrace_supports_stopped_by_hw_breakpoint (struct target_ops *ops)
2666{
a52eab48 2667 if (record_btrace_is_replaying (ops, minus_one_ptid))
9e8915c6 2668 return 1;
52834460 2669
9e8915c6 2670 return ops->beneath->to_supports_stopped_by_hw_breakpoint (ops->beneath);
b2f4cfde
MM
2671}
2672
e8032dde 2673/* The to_update_thread_list method of target record-btrace. */
e2887aa3
MM
2674
2675static void
e8032dde 2676record_btrace_update_thread_list (struct target_ops *ops)
e2887aa3 2677{
e8032dde 2678 /* We don't add or remove threads during replay. */
a52eab48 2679 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2680 return;
2681
2682 /* Forward the request. */
e75fdfca 2683 ops = ops->beneath;
e8032dde 2684 ops->to_update_thread_list (ops);
e2887aa3
MM
2685}
2686
2687/* The to_thread_alive method of target record-btrace. */
2688
2689static int
2690record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
2691{
2692 /* We don't add or remove threads during replay. */
a52eab48 2693 if (record_btrace_is_replaying (ops, minus_one_ptid))
e2887aa3
MM
2694 return find_thread_ptid (ptid) != NULL;
2695
2696 /* Forward the request. */
e75fdfca
TT
2697 ops = ops->beneath;
2698 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
2699}
2700
066ce621
MM
2701/* Set the replay branch trace instruction iterator. If IT is NULL, replay
2702 is stopped. */
2703
2704static void
2705record_btrace_set_replay (struct thread_info *tp,
2706 const struct btrace_insn_iterator *it)
2707{
2708 struct btrace_thread_info *btinfo;
2709
2710 btinfo = &tp->btrace;
2711
a0f1b963 2712 if (it == NULL)
52834460 2713 record_btrace_stop_replaying (tp);
066ce621
MM
2714 else
2715 {
2716 if (btinfo->replay == NULL)
52834460 2717 record_btrace_start_replaying (tp);
066ce621
MM
2718 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
2719 return;
2720
2721 *btinfo->replay = *it;
52834460 2722 registers_changed_ptid (tp->ptid);
066ce621
MM
2723 }
2724
52834460
MM
2725 /* Start anew from the new replay position. */
2726 record_btrace_clear_histories (btinfo);
485668e5
MM
2727
2728 stop_pc = regcache_read_pc (get_current_regcache ());
2729 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
066ce621
MM
2730}
2731
2732/* The to_goto_record_begin method of target record-btrace. */
2733
2734static void
08475817 2735record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
2736{
2737 struct thread_info *tp;
2738 struct btrace_insn_iterator begin;
2739
2740 tp = require_btrace_thread ();
2741
2742 btrace_insn_begin (&begin, &tp->btrace);
b61ce85c
MM
2743
2744 /* Skip gaps at the beginning of the trace. */
2745 while (btrace_insn_get (&begin) == NULL)
2746 {
2747 unsigned int steps;
2748
2749 steps = btrace_insn_next (&begin, 1);
2750 if (steps == 0)
2751 error (_("No trace."));
2752 }
2753
066ce621 2754 record_btrace_set_replay (tp, &begin);
066ce621
MM
2755}
2756
2757/* The to_goto_record_end method of target record-btrace. */
2758
2759static void
307a1b91 2760record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
2761{
2762 struct thread_info *tp;
2763
2764 tp = require_btrace_thread ();
2765
2766 record_btrace_set_replay (tp, NULL);
066ce621
MM
2767}
2768
2769/* The to_goto_record method of target record-btrace. */
2770
2771static void
606183ac 2772record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
2773{
2774 struct thread_info *tp;
2775 struct btrace_insn_iterator it;
2776 unsigned int number;
2777 int found;
2778
2779 number = insn;
2780
2781 /* Check for wrap-arounds. */
2782 if (number != insn)
2783 error (_("Instruction number out of range."));
2784
2785 tp = require_btrace_thread ();
2786
2787 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
69090cee
TW
2788
2789 /* Check if the instruction could not be found or is a gap. */
2790 if (found == 0 || btrace_insn_get (&it) == NULL)
066ce621
MM
2791 error (_("No such instruction."));
2792
2793 record_btrace_set_replay (tp, &it);
066ce621
MM
2794}
2795
797094dd
MM
2796/* The to_record_stop_replaying method of target record-btrace. */
2797
2798static void
2799record_btrace_stop_replaying_all (struct target_ops *self)
2800{
2801 struct thread_info *tp;
2802
2803 ALL_NON_EXITED_THREADS (tp)
2804 record_btrace_stop_replaying (tp);
2805}
2806
70ad5bff
MM
2807/* The to_execution_direction target method. */
2808
2809static enum exec_direction_kind
2810record_btrace_execution_direction (struct target_ops *self)
2811{
2812 return record_btrace_resume_exec_dir;
2813}
2814
aef92902
MM
2815/* The to_prepare_to_generate_core target method. */
2816
2817static void
2818record_btrace_prepare_to_generate_core (struct target_ops *self)
2819{
2820 record_btrace_generating_corefile = 1;
2821}
2822
2823/* The to_done_generating_core target method. */
2824
2825static void
2826record_btrace_done_generating_core (struct target_ops *self)
2827{
2828 record_btrace_generating_corefile = 0;
2829}
2830
afedecd3
MM
2831/* Initialize the record-btrace target ops. */
2832
2833static void
2834init_record_btrace_ops (void)
2835{
2836 struct target_ops *ops;
2837
2838 ops = &record_btrace_ops;
2839 ops->to_shortname = "record-btrace";
2840 ops->to_longname = "Branch tracing target";
2841 ops->to_doc = "Collect control-flow trace and provide the execution history.";
2842 ops->to_open = record_btrace_open;
2843 ops->to_close = record_btrace_close;
b7d2e916 2844 ops->to_async = record_btrace_async;
afedecd3 2845 ops->to_detach = record_detach;
c0272db5 2846 ops->to_disconnect = record_btrace_disconnect;
afedecd3
MM
2847 ops->to_mourn_inferior = record_mourn_inferior;
2848 ops->to_kill = record_kill;
afedecd3
MM
2849 ops->to_stop_recording = record_btrace_stop_recording;
2850 ops->to_info_record = record_btrace_info;
2851 ops->to_insn_history = record_btrace_insn_history;
2852 ops->to_insn_history_from = record_btrace_insn_history_from;
2853 ops->to_insn_history_range = record_btrace_insn_history_range;
2854 ops->to_call_history = record_btrace_call_history;
2855 ops->to_call_history_from = record_btrace_call_history_from;
2856 ops->to_call_history_range = record_btrace_call_history_range;
b158a20f 2857 ops->to_record_method = record_btrace_record_method;
07bbe694 2858 ops->to_record_is_replaying = record_btrace_is_replaying;
7ff27e9b 2859 ops->to_record_will_replay = record_btrace_will_replay;
797094dd 2860 ops->to_record_stop_replaying = record_btrace_stop_replaying_all;
633785ff
MM
2861 ops->to_xfer_partial = record_btrace_xfer_partial;
2862 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
2863 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
2864 ops->to_fetch_registers = record_btrace_fetch_registers;
2865 ops->to_store_registers = record_btrace_store_registers;
2866 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
2867 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
2868 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde 2869 ops->to_resume = record_btrace_resume;
85ad3aaf 2870 ops->to_commit_resume = record_btrace_commit_resume;
b2f4cfde 2871 ops->to_wait = record_btrace_wait;
6e4879f0 2872 ops->to_stop = record_btrace_stop;
e8032dde 2873 ops->to_update_thread_list = record_btrace_update_thread_list;
e2887aa3 2874 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
2875 ops->to_goto_record_begin = record_btrace_goto_begin;
2876 ops->to_goto_record_end = record_btrace_goto_end;
2877 ops->to_goto_record = record_btrace_goto;
52834460 2878 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
9e8915c6
PA
2879 ops->to_stopped_by_sw_breakpoint = record_btrace_stopped_by_sw_breakpoint;
2880 ops->to_supports_stopped_by_sw_breakpoint
2881 = record_btrace_supports_stopped_by_sw_breakpoint;
2882 ops->to_stopped_by_hw_breakpoint = record_btrace_stopped_by_hw_breakpoint;
2883 ops->to_supports_stopped_by_hw_breakpoint
2884 = record_btrace_supports_stopped_by_hw_breakpoint;
70ad5bff 2885 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
2886 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
2887 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
2888 ops->to_stratum = record_stratum;
2889 ops->to_magic = OPS_MAGIC;
2890}
2891
f4abbc16
MM
2892/* Start recording in BTS format. */
2893
2894static void
cdb34d4a 2895cmd_record_btrace_bts_start (const char *args, int from_tty)
f4abbc16 2896{
f4abbc16
MM
2897 if (args != NULL && *args != 0)
2898 error (_("Invalid argument."));
2899
2900 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2901
492d29ea
PA
2902 TRY
2903 {
95a6b0a1 2904 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2905 }
2906 CATCH (exception, RETURN_MASK_ALL)
f4abbc16
MM
2907 {
2908 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2909 throw_exception (exception);
2910 }
492d29ea 2911 END_CATCH
f4abbc16
MM
2912}
2913
bc504a31 2914/* Start recording in Intel Processor Trace format. */
afedecd3
MM
2915
2916static void
cdb34d4a 2917cmd_record_btrace_pt_start (const char *args, int from_tty)
afedecd3
MM
2918{
2919 if (args != NULL && *args != 0)
2920 error (_("Invalid argument."));
2921
b20a6524 2922 record_btrace_conf.format = BTRACE_FORMAT_PT;
f4abbc16 2923
492d29ea
PA
2924 TRY
2925 {
95a6b0a1 2926 execute_command ("target record-btrace", from_tty);
492d29ea
PA
2927 }
2928 CATCH (exception, RETURN_MASK_ALL)
2929 {
2930 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2931 throw_exception (exception);
2932 }
2933 END_CATCH
afedecd3
MM
2934}
2935
b20a6524
MM
2936/* Alias for "target record". */
2937
2938static void
981a3fb3 2939cmd_record_btrace_start (const char *args, int from_tty)
b20a6524
MM
2940{
2941 if (args != NULL && *args != 0)
2942 error (_("Invalid argument."));
2943
2944 record_btrace_conf.format = BTRACE_FORMAT_PT;
2945
2946 TRY
2947 {
95a6b0a1 2948 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2949 }
2950 CATCH (exception, RETURN_MASK_ALL)
2951 {
2952 record_btrace_conf.format = BTRACE_FORMAT_BTS;
2953
2954 TRY
2955 {
95a6b0a1 2956 execute_command ("target record-btrace", from_tty);
b20a6524
MM
2957 }
2958 CATCH (exception, RETURN_MASK_ALL)
2959 {
2960 record_btrace_conf.format = BTRACE_FORMAT_NONE;
2961 throw_exception (exception);
2962 }
2963 END_CATCH
2964 }
2965 END_CATCH
2966}
2967
67b5c0c1
MM
2968/* The "set record btrace" command. */
2969
2970static void
981a3fb3 2971cmd_set_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2972{
2973 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
2974}
2975
2976/* The "show record btrace" command. */
2977
2978static void
981a3fb3 2979cmd_show_record_btrace (const char *args, int from_tty)
67b5c0c1
MM
2980{
2981 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2982}
2983
2984/* The "show record btrace replay-memory-access" command. */
2985
2986static void
2987cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2988 struct cmd_list_element *c, const char *value)
2989{
2990 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2991 replay_memory_access);
2992}
2993
d33501a5
MM
2994/* The "set record btrace bts" command. */
2995
2996static void
981a3fb3 2997cmd_set_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
2998{
2999 printf_unfiltered (_("\"set record btrace bts\" must be followed "
b20a6524 3000 "by an appropriate subcommand.\n"));
d33501a5
MM
3001 help_list (set_record_btrace_bts_cmdlist, "set record btrace bts ",
3002 all_commands, gdb_stdout);
3003}
3004
3005/* The "show record btrace bts" command. */
3006
3007static void
981a3fb3 3008cmd_show_record_btrace_bts (const char *args, int from_tty)
d33501a5
MM
3009{
3010 cmd_show_list (show_record_btrace_bts_cmdlist, from_tty, "");
3011}
3012
b20a6524
MM
3013/* The "set record btrace pt" command. */
3014
3015static void
981a3fb3 3016cmd_set_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3017{
3018 printf_unfiltered (_("\"set record btrace pt\" must be followed "
3019 "by an appropriate subcommand.\n"));
3020 help_list (set_record_btrace_pt_cmdlist, "set record btrace pt ",
3021 all_commands, gdb_stdout);
3022}
3023
3024/* The "show record btrace pt" command. */
3025
3026static void
981a3fb3 3027cmd_show_record_btrace_pt (const char *args, int from_tty)
b20a6524
MM
3028{
3029 cmd_show_list (show_record_btrace_pt_cmdlist, from_tty, "");
3030}
3031
3032/* The "record bts buffer-size" show value function. */
3033
3034static void
3035show_record_bts_buffer_size_value (struct ui_file *file, int from_tty,
3036 struct cmd_list_element *c,
3037 const char *value)
3038{
3039 fprintf_filtered (file, _("The record/replay bts buffer size is %s.\n"),
3040 value);
3041}
3042
3043/* The "record pt buffer-size" show value function. */
3044
3045static void
3046show_record_pt_buffer_size_value (struct ui_file *file, int from_tty,
3047 struct cmd_list_element *c,
3048 const char *value)
3049{
3050 fprintf_filtered (file, _("The record/replay pt buffer size is %s.\n"),
3051 value);
3052}
3053
afedecd3
MM
3054/* Initialize btrace commands. */
3055
3056void
3057_initialize_record_btrace (void)
3058{
f4abbc16
MM
3059 add_prefix_cmd ("btrace", class_obscure, cmd_record_btrace_start,
3060 _("Start branch trace recording."), &record_btrace_cmdlist,
3061 "record btrace ", 0, &record_cmdlist);
afedecd3
MM
3062 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
3063
f4abbc16
MM
3064 add_cmd ("bts", class_obscure, cmd_record_btrace_bts_start,
3065 _("\
3066Start branch trace recording in Branch Trace Store (BTS) format.\n\n\
3067The processor stores a from/to record for each branch into a cyclic buffer.\n\
3068This format may not be available on all processors."),
3069 &record_btrace_cmdlist);
3070 add_alias_cmd ("bts", "btrace bts", class_obscure, 1, &record_cmdlist);
3071
b20a6524
MM
3072 add_cmd ("pt", class_obscure, cmd_record_btrace_pt_start,
3073 _("\
bc504a31 3074Start branch trace recording in Intel Processor Trace format.\n\n\
b20a6524
MM
3075This format may not be available on all processors."),
3076 &record_btrace_cmdlist);
3077 add_alias_cmd ("pt", "btrace pt", class_obscure, 1, &record_cmdlist);
3078
67b5c0c1
MM
3079 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
3080 _("Set record options"), &set_record_btrace_cmdlist,
3081 "set record btrace ", 0, &set_record_cmdlist);
3082
3083 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
3084 _("Show record options"), &show_record_btrace_cmdlist,
3085 "show record btrace ", 0, &show_record_cmdlist);
3086
3087 add_setshow_enum_cmd ("replay-memory-access", no_class,
3088 replay_memory_access_types, &replay_memory_access, _("\
3089Set what memory accesses are allowed during replay."), _("\
3090Show what memory accesses are allowed during replay."),
3091 _("Default is READ-ONLY.\n\n\
3092The btrace record target does not trace data.\n\
3093The memory therefore corresponds to the live target and not \
3094to the current replay position.\n\n\
3095When READ-ONLY, allow accesses to read-only memory during replay.\n\
3096When READ-WRITE, allow accesses to read-only and read-write memory during \
3097replay."),
3098 NULL, cmd_show_replay_memory_access,
3099 &set_record_btrace_cmdlist,
3100 &show_record_btrace_cmdlist);
3101
d33501a5
MM
3102 add_prefix_cmd ("bts", class_support, cmd_set_record_btrace_bts,
3103 _("Set record btrace bts options"),
3104 &set_record_btrace_bts_cmdlist,
3105 "set record btrace bts ", 0, &set_record_btrace_cmdlist);
3106
3107 add_prefix_cmd ("bts", class_support, cmd_show_record_btrace_bts,
3108 _("Show record btrace bts options"),
3109 &show_record_btrace_bts_cmdlist,
3110 "show record btrace bts ", 0, &show_record_btrace_cmdlist);
3111
3112 add_setshow_uinteger_cmd ("buffer-size", no_class,
3113 &record_btrace_conf.bts.size,
3114 _("Set the record/replay bts buffer size."),
3115 _("Show the record/replay bts buffer size."), _("\
3116When starting recording request a trace buffer of this size. \
3117The actual buffer size may differ from the requested size. \
3118Use \"info record\" to see the actual buffer size.\n\n\
3119Bigger buffers allow longer recording but also take more time to process \
3120the recorded execution trace.\n\n\
b20a6524
MM
3121The trace buffer size may not be changed while recording."), NULL,
3122 show_record_bts_buffer_size_value,
d33501a5
MM
3123 &set_record_btrace_bts_cmdlist,
3124 &show_record_btrace_bts_cmdlist);
3125
b20a6524
MM
3126 add_prefix_cmd ("pt", class_support, cmd_set_record_btrace_pt,
3127 _("Set record btrace pt options"),
3128 &set_record_btrace_pt_cmdlist,
3129 "set record btrace pt ", 0, &set_record_btrace_cmdlist);
3130
3131 add_prefix_cmd ("pt", class_support, cmd_show_record_btrace_pt,
3132 _("Show record btrace pt options"),
3133 &show_record_btrace_pt_cmdlist,
3134 "show record btrace pt ", 0, &show_record_btrace_cmdlist);
3135
3136 add_setshow_uinteger_cmd ("buffer-size", no_class,
3137 &record_btrace_conf.pt.size,
3138 _("Set the record/replay pt buffer size."),
3139 _("Show the record/replay pt buffer size."), _("\
3140Bigger buffers allow longer recording but also take more time to process \
3141the recorded execution.\n\
3142The actual buffer size may differ from the requested size. Use \"info record\" \
3143to see the actual buffer size."), NULL, show_record_pt_buffer_size_value,
3144 &set_record_btrace_pt_cmdlist,
3145 &show_record_btrace_pt_cmdlist);
3146
afedecd3
MM
3147 init_record_btrace_ops ();
3148 add_target (&record_btrace_ops);
0b722aec
MM
3149
3150 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
3151 xcalloc, xfree);
d33501a5
MM
3152
3153 record_btrace_conf.bts.size = 64 * 1024;
b20a6524 3154 record_btrace_conf.pt.size = 16 * 1024;
afedecd3 3155}