]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
constify some cli-utils stuff
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "exceptions.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
45741a9c 38#include "infrun.h"
70ad5bff
MM
39#include "event-loop.h"
40#include "inf-loop.h"
afedecd3
MM
41
42/* The target_ops of record-btrace. */
43static struct target_ops record_btrace_ops;
44
45/* A new thread observer enabling branch tracing for the new thread. */
46static struct observer *record_btrace_thread_observer;
47
67b5c0c1
MM
48/* Memory access types used in set/show record btrace replay-memory-access. */
49static const char replay_memory_access_read_only[] = "read-only";
50static const char replay_memory_access_read_write[] = "read-write";
51static const char *const replay_memory_access_types[] =
52{
53 replay_memory_access_read_only,
54 replay_memory_access_read_write,
55 NULL
56};
57
58/* The currently allowed replay memory access type. */
59static const char *replay_memory_access = replay_memory_access_read_only;
60
61/* Command lists for "set/show record btrace". */
62static struct cmd_list_element *set_record_btrace_cmdlist;
63static struct cmd_list_element *show_record_btrace_cmdlist;
633785ff 64
70ad5bff
MM
65/* The execution direction of the last resume we got. See record-full.c. */
66static enum exec_direction_kind record_btrace_resume_exec_dir = EXEC_FORWARD;
67
68/* The async event handler for reverse/replay execution. */
69static struct async_event_handler *record_btrace_async_inferior_event_handler;
70
aef92902
MM
71/* A flag indicating that we are currently generating a core file. */
72static int record_btrace_generating_corefile;
73
afedecd3
MM
74/* Print a record-btrace debug message. Use do ... while (0) to avoid
75 ambiguities when used in if statements. */
76
77#define DEBUG(msg, args...) \
78 do \
79 { \
80 if (record_debug != 0) \
81 fprintf_unfiltered (gdb_stdlog, \
82 "[record-btrace] " msg "\n", ##args); \
83 } \
84 while (0)
85
86
87/* Update the branch trace for the current thread and return a pointer to its
066ce621 88 thread_info.
afedecd3
MM
89
90 Throws an error if there is no thread or no trace. This function never
91 returns NULL. */
92
066ce621
MM
93static struct thread_info *
94require_btrace_thread (void)
afedecd3
MM
95{
96 struct thread_info *tp;
afedecd3
MM
97
98 DEBUG ("require");
99
100 tp = find_thread_ptid (inferior_ptid);
101 if (tp == NULL)
102 error (_("No thread."));
103
104 btrace_fetch (tp);
105
6e07b1d2 106 if (btrace_is_empty (tp))
afedecd3
MM
107 error (_("No trace."));
108
066ce621
MM
109 return tp;
110}
111
112/* Update the branch trace for the current thread and return a pointer to its
113 branch trace information struct.
114
115 Throws an error if there is no thread or no trace. This function never
116 returns NULL. */
117
118static struct btrace_thread_info *
119require_btrace (void)
120{
121 struct thread_info *tp;
122
123 tp = require_btrace_thread ();
124
125 return &tp->btrace;
afedecd3
MM
126}
127
128/* Enable branch tracing for one thread. Warn on errors. */
129
130static void
131record_btrace_enable_warn (struct thread_info *tp)
132{
133 volatile struct gdb_exception error;
134
135 TRY_CATCH (error, RETURN_MASK_ERROR)
136 btrace_enable (tp);
137
138 if (error.message != NULL)
139 warning ("%s", error.message);
140}
141
142/* Callback function to disable branch tracing for one thread. */
143
144static void
145record_btrace_disable_callback (void *arg)
146{
147 struct thread_info *tp;
148
149 tp = arg;
150
151 btrace_disable (tp);
152}
153
154/* Enable automatic tracing of new threads. */
155
156static void
157record_btrace_auto_enable (void)
158{
159 DEBUG ("attach thread observer");
160
161 record_btrace_thread_observer
162 = observer_attach_new_thread (record_btrace_enable_warn);
163}
164
165/* Disable automatic tracing of new threads. */
166
167static void
168record_btrace_auto_disable (void)
169{
170 /* The observer may have been detached, already. */
171 if (record_btrace_thread_observer == NULL)
172 return;
173
174 DEBUG ("detach thread observer");
175
176 observer_detach_new_thread (record_btrace_thread_observer);
177 record_btrace_thread_observer = NULL;
178}
179
70ad5bff
MM
180/* The record-btrace async event handler function. */
181
182static void
183record_btrace_handle_async_inferior_event (gdb_client_data data)
184{
185 inferior_event_handler (INF_REG_EVENT, NULL);
186}
187
afedecd3
MM
188/* The to_open method of target record-btrace. */
189
190static void
191record_btrace_open (char *args, int from_tty)
192{
193 struct cleanup *disable_chain;
194 struct thread_info *tp;
195
196 DEBUG ("open");
197
8213266a 198 record_preopen ();
afedecd3
MM
199
200 if (!target_has_execution)
201 error (_("The program is not being run."));
202
203 if (!target_supports_btrace ())
204 error (_("Target does not support branch tracing."));
205
52834460
MM
206 if (non_stop)
207 error (_("Record btrace can't debug inferior in non-stop mode."));
208
afedecd3
MM
209 gdb_assert (record_btrace_thread_observer == NULL);
210
211 disable_chain = make_cleanup (null_cleanup, NULL);
034f788c 212 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
213 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
214 {
215 btrace_enable (tp);
216
217 make_cleanup (record_btrace_disable_callback, tp);
218 }
219
220 record_btrace_auto_enable ();
221
222 push_target (&record_btrace_ops);
223
70ad5bff
MM
224 record_btrace_async_inferior_event_handler
225 = create_async_event_handler (record_btrace_handle_async_inferior_event,
226 NULL);
aef92902 227 record_btrace_generating_corefile = 0;
70ad5bff 228
afedecd3
MM
229 observer_notify_record_changed (current_inferior (), 1);
230
231 discard_cleanups (disable_chain);
232}
233
234/* The to_stop_recording method of target record-btrace. */
235
236static void
c6cd7c02 237record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
238{
239 struct thread_info *tp;
240
241 DEBUG ("stop recording");
242
243 record_btrace_auto_disable ();
244
034f788c 245 ALL_NON_EXITED_THREADS (tp)
afedecd3
MM
246 if (tp->btrace.target != NULL)
247 btrace_disable (tp);
248}
249
250/* The to_close method of target record-btrace. */
251
252static void
de90e03d 253record_btrace_close (struct target_ops *self)
afedecd3 254{
568e808b
MM
255 struct thread_info *tp;
256
70ad5bff
MM
257 if (record_btrace_async_inferior_event_handler != NULL)
258 delete_async_event_handler (&record_btrace_async_inferior_event_handler);
259
99c819ee
MM
260 /* Make sure automatic recording gets disabled even if we did not stop
261 recording before closing the record-btrace target. */
262 record_btrace_auto_disable ();
263
568e808b
MM
264 /* We should have already stopped recording.
265 Tear down btrace in case we have not. */
034f788c 266 ALL_NON_EXITED_THREADS (tp)
568e808b 267 btrace_teardown (tp);
afedecd3
MM
268}
269
270/* The to_info_record method of target record-btrace. */
271
272static void
630d6a4a 273record_btrace_info (struct target_ops *self)
afedecd3
MM
274{
275 struct btrace_thread_info *btinfo;
276 struct thread_info *tp;
23a7fe75 277 unsigned int insns, calls;
afedecd3
MM
278
279 DEBUG ("info");
280
281 tp = find_thread_ptid (inferior_ptid);
282 if (tp == NULL)
283 error (_("No thread."));
284
285 btrace_fetch (tp);
286
23a7fe75
MM
287 insns = 0;
288 calls = 0;
289
afedecd3 290 btinfo = &tp->btrace;
6e07b1d2
MM
291
292 if (!btrace_is_empty (tp))
23a7fe75
MM
293 {
294 struct btrace_call_iterator call;
295 struct btrace_insn_iterator insn;
296
297 btrace_call_end (&call, btinfo);
298 btrace_call_prev (&call, 1);
5de9129b 299 calls = btrace_call_number (&call);
23a7fe75
MM
300
301 btrace_insn_end (&insn, btinfo);
302 btrace_insn_prev (&insn, 1);
5de9129b 303 insns = btrace_insn_number (&insn);
23a7fe75 304 }
afedecd3
MM
305
306 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 307 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 308 target_pid_to_str (tp->ptid));
07bbe694
MM
309
310 if (btrace_is_replaying (tp))
311 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
312 btrace_insn_number (btinfo->replay));
afedecd3
MM
313}
314
315/* Print an unsigned int. */
316
317static void
318ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
319{
320 ui_out_field_fmt (uiout, fld, "%u", val);
321}
322
323/* Disassemble a section of the recorded instruction trace. */
324
325static void
23a7fe75
MM
326btrace_insn_history (struct ui_out *uiout,
327 const struct btrace_insn_iterator *begin,
328 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
329{
330 struct gdbarch *gdbarch;
23a7fe75 331 struct btrace_insn_iterator it;
afedecd3 332
23a7fe75
MM
333 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
334 btrace_insn_number (end));
afedecd3
MM
335
336 gdbarch = target_gdbarch ();
337
23a7fe75 338 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 339 {
23a7fe75
MM
340 const struct btrace_insn *insn;
341
342 insn = btrace_insn_get (&it);
343
afedecd3 344 /* Print the instruction index. */
23a7fe75 345 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
346 ui_out_text (uiout, "\t");
347
348 /* Disassembly with '/m' flag may not produce the expected result.
349 See PR gdb/11833. */
23a7fe75 350 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
351 }
352}
353
354/* The to_insn_history method of target record-btrace. */
355
356static void
7a6c5609 357record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
358{
359 struct btrace_thread_info *btinfo;
23a7fe75
MM
360 struct btrace_insn_history *history;
361 struct btrace_insn_iterator begin, end;
afedecd3
MM
362 struct cleanup *uiout_cleanup;
363 struct ui_out *uiout;
23a7fe75 364 unsigned int context, covered;
afedecd3
MM
365
366 uiout = current_uiout;
367 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
368 "insn history");
afedecd3 369 context = abs (size);
afedecd3
MM
370 if (context == 0)
371 error (_("Bad record instruction-history-size."));
372
23a7fe75
MM
373 btinfo = require_btrace ();
374 history = btinfo->insn_history;
375 if (history == NULL)
afedecd3 376 {
07bbe694 377 struct btrace_insn_iterator *replay;
afedecd3 378
23a7fe75 379 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 380
07bbe694
MM
381 /* If we're replaying, we start at the replay position. Otherwise, we
382 start at the tail of the trace. */
383 replay = btinfo->replay;
384 if (replay != NULL)
385 begin = *replay;
386 else
387 btrace_insn_end (&begin, btinfo);
388
389 /* We start from here and expand in the requested direction. Then we
390 expand in the other direction, as well, to fill up any remaining
391 context. */
392 end = begin;
393 if (size < 0)
394 {
395 /* We want the current position covered, as well. */
396 covered = btrace_insn_next (&end, 1);
397 covered += btrace_insn_prev (&begin, context - covered);
398 covered += btrace_insn_next (&end, context - covered);
399 }
400 else
401 {
402 covered = btrace_insn_next (&end, context);
403 covered += btrace_insn_prev (&begin, context - covered);
404 }
afedecd3
MM
405 }
406 else
407 {
23a7fe75
MM
408 begin = history->begin;
409 end = history->end;
afedecd3 410
23a7fe75
MM
411 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
412 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 413
23a7fe75
MM
414 if (size < 0)
415 {
416 end = begin;
417 covered = btrace_insn_prev (&begin, context);
418 }
419 else
420 {
421 begin = end;
422 covered = btrace_insn_next (&end, context);
423 }
afedecd3
MM
424 }
425
23a7fe75
MM
426 if (covered > 0)
427 btrace_insn_history (uiout, &begin, &end, flags);
428 else
429 {
430 if (size < 0)
431 printf_unfiltered (_("At the start of the branch trace record.\n"));
432 else
433 printf_unfiltered (_("At the end of the branch trace record.\n"));
434 }
afedecd3 435
23a7fe75 436 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
437 do_cleanups (uiout_cleanup);
438}
439
440/* The to_insn_history_range method of target record-btrace. */
441
442static void
4e99c6b7
TT
443record_btrace_insn_history_range (struct target_ops *self,
444 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
445{
446 struct btrace_thread_info *btinfo;
23a7fe75
MM
447 struct btrace_insn_history *history;
448 struct btrace_insn_iterator begin, end;
afedecd3
MM
449 struct cleanup *uiout_cleanup;
450 struct ui_out *uiout;
23a7fe75
MM
451 unsigned int low, high;
452 int found;
afedecd3
MM
453
454 uiout = current_uiout;
455 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
456 "insn history");
23a7fe75
MM
457 low = from;
458 high = to;
afedecd3 459
23a7fe75 460 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
461
462 /* Check for wrap-arounds. */
23a7fe75 463 if (low != from || high != to)
afedecd3
MM
464 error (_("Bad range."));
465
0688d04e 466 if (high < low)
afedecd3
MM
467 error (_("Bad range."));
468
23a7fe75 469 btinfo = require_btrace ();
afedecd3 470
23a7fe75
MM
471 found = btrace_find_insn_by_number (&begin, btinfo, low);
472 if (found == 0)
473 error (_("Range out of bounds."));
afedecd3 474
23a7fe75
MM
475 found = btrace_find_insn_by_number (&end, btinfo, high);
476 if (found == 0)
0688d04e
MM
477 {
478 /* Silently truncate the range. */
479 btrace_insn_end (&end, btinfo);
480 }
481 else
482 {
483 /* We want both begin and end to be inclusive. */
484 btrace_insn_next (&end, 1);
485 }
afedecd3 486
23a7fe75
MM
487 btrace_insn_history (uiout, &begin, &end, flags);
488 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
489
490 do_cleanups (uiout_cleanup);
491}
492
493/* The to_insn_history_from method of target record-btrace. */
494
495static void
9abc3ff3
TT
496record_btrace_insn_history_from (struct target_ops *self,
497 ULONGEST from, int size, int flags)
afedecd3
MM
498{
499 ULONGEST begin, end, context;
500
501 context = abs (size);
0688d04e
MM
502 if (context == 0)
503 error (_("Bad record instruction-history-size."));
afedecd3
MM
504
505 if (size < 0)
506 {
507 end = from;
508
509 if (from < context)
510 begin = 0;
511 else
0688d04e 512 begin = from - context + 1;
afedecd3
MM
513 }
514 else
515 {
516 begin = from;
0688d04e 517 end = from + context - 1;
afedecd3
MM
518
519 /* Check for wrap-around. */
520 if (end < begin)
521 end = ULONGEST_MAX;
522 }
523
4e99c6b7 524 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
525}
526
527/* Print the instruction number range for a function call history line. */
528
529static void
23a7fe75
MM
530btrace_call_history_insn_range (struct ui_out *uiout,
531 const struct btrace_function *bfun)
afedecd3 532{
7acbe133
MM
533 unsigned int begin, end, size;
534
535 size = VEC_length (btrace_insn_s, bfun->insn);
536 gdb_assert (size > 0);
afedecd3 537
23a7fe75 538 begin = bfun->insn_offset;
7acbe133 539 end = begin + size - 1;
afedecd3 540
23a7fe75 541 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 542 ui_out_text (uiout, ",");
23a7fe75 543 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
544}
545
546/* Print the source line information for a function call history line. */
547
548static void
23a7fe75
MM
549btrace_call_history_src_line (struct ui_out *uiout,
550 const struct btrace_function *bfun)
afedecd3
MM
551{
552 struct symbol *sym;
23a7fe75 553 int begin, end;
afedecd3
MM
554
555 sym = bfun->sym;
556 if (sym == NULL)
557 return;
558
559 ui_out_field_string (uiout, "file",
560 symtab_to_filename_for_display (sym->symtab));
561
23a7fe75
MM
562 begin = bfun->lbegin;
563 end = bfun->lend;
564
565 if (end < begin)
afedecd3
MM
566 return;
567
568 ui_out_text (uiout, ":");
23a7fe75 569 ui_out_field_int (uiout, "min line", begin);
afedecd3 570
23a7fe75 571 if (end == begin)
afedecd3
MM
572 return;
573
8710b709 574 ui_out_text (uiout, ",");
23a7fe75 575 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
576}
577
0b722aec
MM
578/* Get the name of a branch trace function. */
579
580static const char *
581btrace_get_bfun_name (const struct btrace_function *bfun)
582{
583 struct minimal_symbol *msym;
584 struct symbol *sym;
585
586 if (bfun == NULL)
587 return "??";
588
589 msym = bfun->msym;
590 sym = bfun->sym;
591
592 if (sym != NULL)
593 return SYMBOL_PRINT_NAME (sym);
594 else if (msym != NULL)
efd66ac6 595 return MSYMBOL_PRINT_NAME (msym);
0b722aec
MM
596 else
597 return "??";
598}
599
afedecd3
MM
600/* Disassemble a section of the recorded function trace. */
601
602static void
23a7fe75 603btrace_call_history (struct ui_out *uiout,
8710b709 604 const struct btrace_thread_info *btinfo,
23a7fe75
MM
605 const struct btrace_call_iterator *begin,
606 const struct btrace_call_iterator *end,
afedecd3
MM
607 enum record_print_flag flags)
608{
23a7fe75 609 struct btrace_call_iterator it;
afedecd3 610
23a7fe75
MM
611 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
612 btrace_call_number (end));
afedecd3 613
23a7fe75 614 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 615 {
23a7fe75
MM
616 const struct btrace_function *bfun;
617 struct minimal_symbol *msym;
618 struct symbol *sym;
619
620 bfun = btrace_call_get (&it);
23a7fe75 621 sym = bfun->sym;
0b722aec 622 msym = bfun->msym;
23a7fe75 623
afedecd3 624 /* Print the function index. */
23a7fe75 625 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
626 ui_out_text (uiout, "\t");
627
8710b709
MM
628 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
629 {
630 int level = bfun->level + btinfo->level, i;
631
632 for (i = 0; i < level; ++i)
633 ui_out_text (uiout, " ");
634 }
635
636 if (sym != NULL)
637 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
638 else if (msym != NULL)
efd66ac6 639 ui_out_field_string (uiout, "function", MSYMBOL_PRINT_NAME (msym));
8710b709
MM
640 else if (!ui_out_is_mi_like_p (uiout))
641 ui_out_field_string (uiout, "function", "??");
642
1e038f67 643 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 644 {
8710b709 645 ui_out_text (uiout, _("\tinst "));
23a7fe75 646 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
647 }
648
1e038f67 649 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 650 {
8710b709 651 ui_out_text (uiout, _("\tat "));
23a7fe75 652 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
653 }
654
afedecd3
MM
655 ui_out_text (uiout, "\n");
656 }
657}
658
659/* The to_call_history method of target record-btrace. */
660
661static void
5df2fcba 662record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
663{
664 struct btrace_thread_info *btinfo;
23a7fe75
MM
665 struct btrace_call_history *history;
666 struct btrace_call_iterator begin, end;
afedecd3
MM
667 struct cleanup *uiout_cleanup;
668 struct ui_out *uiout;
23a7fe75 669 unsigned int context, covered;
afedecd3
MM
670
671 uiout = current_uiout;
672 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
673 "insn history");
afedecd3 674 context = abs (size);
afedecd3
MM
675 if (context == 0)
676 error (_("Bad record function-call-history-size."));
677
23a7fe75
MM
678 btinfo = require_btrace ();
679 history = btinfo->call_history;
680 if (history == NULL)
afedecd3 681 {
07bbe694 682 struct btrace_insn_iterator *replay;
afedecd3 683
23a7fe75 684 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 685
07bbe694
MM
686 /* If we're replaying, we start at the replay position. Otherwise, we
687 start at the tail of the trace. */
688 replay = btinfo->replay;
689 if (replay != NULL)
690 {
691 begin.function = replay->function;
692 begin.btinfo = btinfo;
693 }
694 else
695 btrace_call_end (&begin, btinfo);
696
697 /* We start from here and expand in the requested direction. Then we
698 expand in the other direction, as well, to fill up any remaining
699 context. */
700 end = begin;
701 if (size < 0)
702 {
703 /* We want the current position covered, as well. */
704 covered = btrace_call_next (&end, 1);
705 covered += btrace_call_prev (&begin, context - covered);
706 covered += btrace_call_next (&end, context - covered);
707 }
708 else
709 {
710 covered = btrace_call_next (&end, context);
711 covered += btrace_call_prev (&begin, context- covered);
712 }
afedecd3
MM
713 }
714 else
715 {
23a7fe75
MM
716 begin = history->begin;
717 end = history->end;
afedecd3 718
23a7fe75
MM
719 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
720 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 721
23a7fe75
MM
722 if (size < 0)
723 {
724 end = begin;
725 covered = btrace_call_prev (&begin, context);
726 }
727 else
728 {
729 begin = end;
730 covered = btrace_call_next (&end, context);
731 }
afedecd3
MM
732 }
733
23a7fe75 734 if (covered > 0)
8710b709 735 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
736 else
737 {
738 if (size < 0)
739 printf_unfiltered (_("At the start of the branch trace record.\n"));
740 else
741 printf_unfiltered (_("At the end of the branch trace record.\n"));
742 }
afedecd3 743
23a7fe75 744 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
745 do_cleanups (uiout_cleanup);
746}
747
748/* The to_call_history_range method of target record-btrace. */
749
750static void
f0d960ea
TT
751record_btrace_call_history_range (struct target_ops *self,
752 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
753{
754 struct btrace_thread_info *btinfo;
23a7fe75
MM
755 struct btrace_call_history *history;
756 struct btrace_call_iterator begin, end;
afedecd3
MM
757 struct cleanup *uiout_cleanup;
758 struct ui_out *uiout;
23a7fe75
MM
759 unsigned int low, high;
760 int found;
afedecd3
MM
761
762 uiout = current_uiout;
763 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
764 "func history");
23a7fe75
MM
765 low = from;
766 high = to;
afedecd3 767
23a7fe75 768 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
769
770 /* Check for wrap-arounds. */
23a7fe75 771 if (low != from || high != to)
afedecd3
MM
772 error (_("Bad range."));
773
0688d04e 774 if (high < low)
afedecd3
MM
775 error (_("Bad range."));
776
23a7fe75 777 btinfo = require_btrace ();
afedecd3 778
23a7fe75
MM
779 found = btrace_find_call_by_number (&begin, btinfo, low);
780 if (found == 0)
781 error (_("Range out of bounds."));
afedecd3 782
23a7fe75
MM
783 found = btrace_find_call_by_number (&end, btinfo, high);
784 if (found == 0)
0688d04e
MM
785 {
786 /* Silently truncate the range. */
787 btrace_call_end (&end, btinfo);
788 }
789 else
790 {
791 /* We want both begin and end to be inclusive. */
792 btrace_call_next (&end, 1);
793 }
afedecd3 794
8710b709 795 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 796 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
797
798 do_cleanups (uiout_cleanup);
799}
800
801/* The to_call_history_from method of target record-btrace. */
802
803static void
ec0aea04
TT
804record_btrace_call_history_from (struct target_ops *self,
805 ULONGEST from, int size, int flags)
afedecd3
MM
806{
807 ULONGEST begin, end, context;
808
809 context = abs (size);
0688d04e
MM
810 if (context == 0)
811 error (_("Bad record function-call-history-size."));
afedecd3
MM
812
813 if (size < 0)
814 {
815 end = from;
816
817 if (from < context)
818 begin = 0;
819 else
0688d04e 820 begin = from - context + 1;
afedecd3
MM
821 }
822 else
823 {
824 begin = from;
0688d04e 825 end = from + context - 1;
afedecd3
MM
826
827 /* Check for wrap-around. */
828 if (end < begin)
829 end = ULONGEST_MAX;
830 }
831
f0d960ea 832 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
833}
834
07bbe694
MM
835/* The to_record_is_replaying method of target record-btrace. */
836
837static int
1c63c994 838record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
839{
840 struct thread_info *tp;
841
034f788c 842 ALL_NON_EXITED_THREADS (tp)
07bbe694
MM
843 if (btrace_is_replaying (tp))
844 return 1;
845
846 return 0;
847}
848
633785ff
MM
849/* The to_xfer_partial method of target record-btrace. */
850
9b409511 851static enum target_xfer_status
633785ff
MM
852record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
853 const char *annex, gdb_byte *readbuf,
854 const gdb_byte *writebuf, ULONGEST offset,
9b409511 855 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
856{
857 struct target_ops *t;
858
859 /* Filter out requests that don't make sense during replay. */
67b5c0c1 860 if (replay_memory_access == replay_memory_access_read_only
aef92902 861 && !record_btrace_generating_corefile
67b5c0c1 862 && record_btrace_is_replaying (ops))
633785ff
MM
863 {
864 switch (object)
865 {
866 case TARGET_OBJECT_MEMORY:
867 {
868 struct target_section *section;
869
870 /* We do not allow writing memory in general. */
871 if (writebuf != NULL)
9b409511
YQ
872 {
873 *xfered_len = len;
bc113b4e 874 return TARGET_XFER_UNAVAILABLE;
9b409511 875 }
633785ff
MM
876
877 /* We allow reading readonly memory. */
878 section = target_section_by_addr (ops, offset);
879 if (section != NULL)
880 {
881 /* Check if the section we found is readonly. */
882 if ((bfd_get_section_flags (section->the_bfd_section->owner,
883 section->the_bfd_section)
884 & SEC_READONLY) != 0)
885 {
886 /* Truncate the request to fit into this section. */
887 len = min (len, section->endaddr - offset);
888 break;
889 }
890 }
891
9b409511 892 *xfered_len = len;
bc113b4e 893 return TARGET_XFER_UNAVAILABLE;
633785ff
MM
894 }
895 }
896 }
897
898 /* Forward the request. */
e75fdfca
TT
899 ops = ops->beneath;
900 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
901 offset, len, xfered_len);
633785ff
MM
902}
903
904/* The to_insert_breakpoint method of target record-btrace. */
905
906static int
907record_btrace_insert_breakpoint (struct target_ops *ops,
908 struct gdbarch *gdbarch,
909 struct bp_target_info *bp_tgt)
910{
911 volatile struct gdb_exception except;
67b5c0c1
MM
912 const char *old;
913 int ret;
633785ff
MM
914
915 /* Inserting breakpoints requires accessing memory. Allow it for the
916 duration of this function. */
67b5c0c1
MM
917 old = replay_memory_access;
918 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
919
920 ret = 0;
921 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 922 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 923
67b5c0c1 924 replay_memory_access = old;
633785ff
MM
925
926 if (except.reason < 0)
927 throw_exception (except);
928
929 return ret;
930}
931
932/* The to_remove_breakpoint method of target record-btrace. */
933
934static int
935record_btrace_remove_breakpoint (struct target_ops *ops,
936 struct gdbarch *gdbarch,
937 struct bp_target_info *bp_tgt)
938{
939 volatile struct gdb_exception except;
67b5c0c1
MM
940 const char *old;
941 int ret;
633785ff
MM
942
943 /* Removing breakpoints requires accessing memory. Allow it for the
944 duration of this function. */
67b5c0c1
MM
945 old = replay_memory_access;
946 replay_memory_access = replay_memory_access_read_write;
633785ff
MM
947
948 ret = 0;
949 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 950 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff 951
67b5c0c1 952 replay_memory_access = old;
633785ff
MM
953
954 if (except.reason < 0)
955 throw_exception (except);
956
957 return ret;
958}
959
1f3ef581
MM
960/* The to_fetch_registers method of target record-btrace. */
961
962static void
963record_btrace_fetch_registers (struct target_ops *ops,
964 struct regcache *regcache, int regno)
965{
966 struct btrace_insn_iterator *replay;
967 struct thread_info *tp;
968
969 tp = find_thread_ptid (inferior_ptid);
970 gdb_assert (tp != NULL);
971
972 replay = tp->btrace.replay;
aef92902 973 if (replay != NULL && !record_btrace_generating_corefile)
1f3ef581
MM
974 {
975 const struct btrace_insn *insn;
976 struct gdbarch *gdbarch;
977 int pcreg;
978
979 gdbarch = get_regcache_arch (regcache);
980 pcreg = gdbarch_pc_regnum (gdbarch);
981 if (pcreg < 0)
982 return;
983
984 /* We can only provide the PC register. */
985 if (regno >= 0 && regno != pcreg)
986 return;
987
988 insn = btrace_insn_get (replay);
989 gdb_assert (insn != NULL);
990
991 regcache_raw_supply (regcache, regno, &insn->pc);
992 }
993 else
994 {
e75fdfca 995 struct target_ops *t = ops->beneath;
1f3ef581 996
e75fdfca 997 t->to_fetch_registers (t, regcache, regno);
1f3ef581
MM
998 }
999}
1000
1001/* The to_store_registers method of target record-btrace. */
1002
1003static void
1004record_btrace_store_registers (struct target_ops *ops,
1005 struct regcache *regcache, int regno)
1006{
1007 struct target_ops *t;
1008
aef92902 1009 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1010 error (_("This record target does not allow writing registers."));
1011
1012 gdb_assert (may_write_registers != 0);
1013
e75fdfca
TT
1014 t = ops->beneath;
1015 t->to_store_registers (t, regcache, regno);
1f3ef581
MM
1016}
1017
1018/* The to_prepare_to_store method of target record-btrace. */
1019
1020static void
1021record_btrace_prepare_to_store (struct target_ops *ops,
1022 struct regcache *regcache)
1023{
1024 struct target_ops *t;
1025
aef92902 1026 if (!record_btrace_generating_corefile && record_btrace_is_replaying (ops))
1f3ef581
MM
1027 return;
1028
e75fdfca
TT
1029 t = ops->beneath;
1030 t->to_prepare_to_store (t, regcache);
1f3ef581
MM
1031}
1032
0b722aec
MM
1033/* The branch trace frame cache. */
1034
1035struct btrace_frame_cache
1036{
1037 /* The thread. */
1038 struct thread_info *tp;
1039
1040 /* The frame info. */
1041 struct frame_info *frame;
1042
1043 /* The branch trace function segment. */
1044 const struct btrace_function *bfun;
1045};
1046
1047/* A struct btrace_frame_cache hash table indexed by NEXT. */
1048
1049static htab_t bfcache;
1050
1051/* hash_f for htab_create_alloc of bfcache. */
1052
1053static hashval_t
1054bfcache_hash (const void *arg)
1055{
1056 const struct btrace_frame_cache *cache = arg;
1057
1058 return htab_hash_pointer (cache->frame);
1059}
1060
1061/* eq_f for htab_create_alloc of bfcache. */
1062
1063static int
1064bfcache_eq (const void *arg1, const void *arg2)
1065{
1066 const struct btrace_frame_cache *cache1 = arg1;
1067 const struct btrace_frame_cache *cache2 = arg2;
1068
1069 return cache1->frame == cache2->frame;
1070}
1071
1072/* Create a new btrace frame cache. */
1073
1074static struct btrace_frame_cache *
1075bfcache_new (struct frame_info *frame)
1076{
1077 struct btrace_frame_cache *cache;
1078 void **slot;
1079
1080 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1081 cache->frame = frame;
1082
1083 slot = htab_find_slot (bfcache, cache, INSERT);
1084 gdb_assert (*slot == NULL);
1085 *slot = cache;
1086
1087 return cache;
1088}
1089
1090/* Extract the branch trace function from a branch trace frame. */
1091
1092static const struct btrace_function *
1093btrace_get_frame_function (struct frame_info *frame)
1094{
1095 const struct btrace_frame_cache *cache;
1096 const struct btrace_function *bfun;
1097 struct btrace_frame_cache pattern;
1098 void **slot;
1099
1100 pattern.frame = frame;
1101
1102 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1103 if (slot == NULL)
1104 return NULL;
1105
1106 cache = *slot;
1107 return cache->bfun;
1108}
1109
cecac1ab
MM
1110/* Implement stop_reason method for record_btrace_frame_unwind. */
1111
1112static enum unwind_stop_reason
1113record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1114 void **this_cache)
1115{
0b722aec
MM
1116 const struct btrace_frame_cache *cache;
1117 const struct btrace_function *bfun;
1118
1119 cache = *this_cache;
1120 bfun = cache->bfun;
1121 gdb_assert (bfun != NULL);
1122
1123 if (bfun->up == NULL)
1124 return UNWIND_UNAVAILABLE;
1125
1126 return UNWIND_NO_REASON;
cecac1ab
MM
1127}
1128
1129/* Implement this_id method for record_btrace_frame_unwind. */
1130
1131static void
1132record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1133 struct frame_id *this_id)
1134{
0b722aec
MM
1135 const struct btrace_frame_cache *cache;
1136 const struct btrace_function *bfun;
1137 CORE_ADDR code, special;
1138
1139 cache = *this_cache;
1140
1141 bfun = cache->bfun;
1142 gdb_assert (bfun != NULL);
1143
1144 while (bfun->segment.prev != NULL)
1145 bfun = bfun->segment.prev;
1146
1147 code = get_frame_func (this_frame);
1148 special = bfun->number;
1149
1150 *this_id = frame_id_build_unavailable_stack_special (code, special);
1151
1152 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1153 btrace_get_bfun_name (cache->bfun),
1154 core_addr_to_string_nz (this_id->code_addr),
1155 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1156}
1157
1158/* Implement prev_register method for record_btrace_frame_unwind. */
1159
1160static struct value *
1161record_btrace_frame_prev_register (struct frame_info *this_frame,
1162 void **this_cache,
1163 int regnum)
1164{
0b722aec
MM
1165 const struct btrace_frame_cache *cache;
1166 const struct btrace_function *bfun, *caller;
1167 const struct btrace_insn *insn;
1168 struct gdbarch *gdbarch;
1169 CORE_ADDR pc;
1170 int pcreg;
1171
1172 gdbarch = get_frame_arch (this_frame);
1173 pcreg = gdbarch_pc_regnum (gdbarch);
1174 if (pcreg < 0 || regnum != pcreg)
1175 throw_error (NOT_AVAILABLE_ERROR,
1176 _("Registers are not available in btrace record history"));
1177
1178 cache = *this_cache;
1179 bfun = cache->bfun;
1180 gdb_assert (bfun != NULL);
1181
1182 caller = bfun->up;
1183 if (caller == NULL)
1184 throw_error (NOT_AVAILABLE_ERROR,
1185 _("No caller in btrace record history"));
1186
1187 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1188 {
1189 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1190 pc = insn->pc;
1191 }
1192 else
1193 {
1194 insn = VEC_last (btrace_insn_s, caller->insn);
1195 pc = insn->pc;
1196
1197 pc += gdb_insn_length (gdbarch, pc);
1198 }
1199
1200 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1201 btrace_get_bfun_name (bfun), bfun->level,
1202 core_addr_to_string_nz (pc));
1203
1204 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1205}
1206
1207/* Implement sniffer method for record_btrace_frame_unwind. */
1208
1209static int
1210record_btrace_frame_sniffer (const struct frame_unwind *self,
1211 struct frame_info *this_frame,
1212 void **this_cache)
1213{
0b722aec
MM
1214 const struct btrace_function *bfun;
1215 struct btrace_frame_cache *cache;
cecac1ab 1216 struct thread_info *tp;
0b722aec 1217 struct frame_info *next;
cecac1ab
MM
1218
1219 /* THIS_FRAME does not contain a reference to its thread. */
1220 tp = find_thread_ptid (inferior_ptid);
1221 gdb_assert (tp != NULL);
1222
0b722aec
MM
1223 bfun = NULL;
1224 next = get_next_frame (this_frame);
1225 if (next == NULL)
1226 {
1227 const struct btrace_insn_iterator *replay;
1228
1229 replay = tp->btrace.replay;
1230 if (replay != NULL)
1231 bfun = replay->function;
1232 }
1233 else
1234 {
1235 const struct btrace_function *callee;
1236
1237 callee = btrace_get_frame_function (next);
1238 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1239 bfun = callee->up;
1240 }
1241
1242 if (bfun == NULL)
1243 return 0;
1244
1245 DEBUG ("[frame] sniffed frame for %s on level %d",
1246 btrace_get_bfun_name (bfun), bfun->level);
1247
1248 /* This is our frame. Initialize the frame cache. */
1249 cache = bfcache_new (this_frame);
1250 cache->tp = tp;
1251 cache->bfun = bfun;
1252
1253 *this_cache = cache;
1254 return 1;
1255}
1256
1257/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1258
1259static int
1260record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1261 struct frame_info *this_frame,
1262 void **this_cache)
1263{
1264 const struct btrace_function *bfun, *callee;
1265 struct btrace_frame_cache *cache;
1266 struct frame_info *next;
1267
1268 next = get_next_frame (this_frame);
1269 if (next == NULL)
1270 return 0;
1271
1272 callee = btrace_get_frame_function (next);
1273 if (callee == NULL)
1274 return 0;
1275
1276 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1277 return 0;
1278
1279 bfun = callee->up;
1280 if (bfun == NULL)
1281 return 0;
1282
1283 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1284 btrace_get_bfun_name (bfun), bfun->level);
1285
1286 /* This is our frame. Initialize the frame cache. */
1287 cache = bfcache_new (this_frame);
1288 cache->tp = find_thread_ptid (inferior_ptid);
1289 cache->bfun = bfun;
1290
1291 *this_cache = cache;
1292 return 1;
1293}
1294
1295static void
1296record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1297{
1298 struct btrace_frame_cache *cache;
1299 void **slot;
1300
1301 cache = this_cache;
1302
1303 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1304 gdb_assert (slot != NULL);
1305
1306 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1307}
1308
1309/* btrace recording does not store previous memory content, neither the stack
1310 frames content. Any unwinding would return errorneous results as the stack
1311 contents no longer matches the changed PC value restored from history.
1312 Therefore this unwinder reports any possibly unwound registers as
1313 <unavailable>. */
1314
0b722aec 1315const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1316{
1317 NORMAL_FRAME,
1318 record_btrace_frame_unwind_stop_reason,
1319 record_btrace_frame_this_id,
1320 record_btrace_frame_prev_register,
1321 NULL,
0b722aec
MM
1322 record_btrace_frame_sniffer,
1323 record_btrace_frame_dealloc_cache
1324};
1325
1326const struct frame_unwind record_btrace_tailcall_frame_unwind =
1327{
1328 TAILCALL_FRAME,
1329 record_btrace_frame_unwind_stop_reason,
1330 record_btrace_frame_this_id,
1331 record_btrace_frame_prev_register,
1332 NULL,
1333 record_btrace_tailcall_frame_sniffer,
1334 record_btrace_frame_dealloc_cache
cecac1ab 1335};
b2f4cfde 1336
ac01945b
TT
1337/* Implement the to_get_unwinder method. */
1338
1339static const struct frame_unwind *
1340record_btrace_to_get_unwinder (struct target_ops *self)
1341{
1342 return &record_btrace_frame_unwind;
1343}
1344
1345/* Implement the to_get_tailcall_unwinder method. */
1346
1347static const struct frame_unwind *
1348record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1349{
1350 return &record_btrace_tailcall_frame_unwind;
1351}
1352
52834460
MM
1353/* Indicate that TP should be resumed according to FLAG. */
1354
1355static void
1356record_btrace_resume_thread (struct thread_info *tp,
1357 enum btrace_thread_flag flag)
1358{
1359 struct btrace_thread_info *btinfo;
1360
1361 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1362
1363 btinfo = &tp->btrace;
1364
1365 if ((btinfo->flags & BTHR_MOVE) != 0)
1366 error (_("Thread already moving."));
1367
1368 /* Fetch the latest branch trace. */
1369 btrace_fetch (tp);
1370
1371 btinfo->flags |= flag;
1372}
1373
1374/* Find the thread to resume given a PTID. */
1375
1376static struct thread_info *
1377record_btrace_find_resume_thread (ptid_t ptid)
1378{
1379 struct thread_info *tp;
1380
1381 /* When asked to resume everything, we pick the current thread. */
1382 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1383 ptid = inferior_ptid;
1384
1385 return find_thread_ptid (ptid);
1386}
1387
1388/* Start replaying a thread. */
1389
1390static struct btrace_insn_iterator *
1391record_btrace_start_replaying (struct thread_info *tp)
1392{
1393 volatile struct gdb_exception except;
1394 struct btrace_insn_iterator *replay;
1395 struct btrace_thread_info *btinfo;
1396 int executing;
1397
1398 btinfo = &tp->btrace;
1399 replay = NULL;
1400
1401 /* We can't start replaying without trace. */
1402 if (btinfo->begin == NULL)
1403 return NULL;
1404
1405 /* Clear the executing flag to allow changes to the current frame.
1406 We are not actually running, yet. We just started a reverse execution
1407 command or a record goto command.
1408 For the latter, EXECUTING is false and this has no effect.
1409 For the former, EXECUTING is true and we're in to_wait, about to
1410 move the thread. Since we need to recompute the stack, we temporarily
1411 set EXECUTING to flase. */
1412 executing = is_executing (tp->ptid);
1413 set_executing (tp->ptid, 0);
1414
1415 /* GDB stores the current frame_id when stepping in order to detects steps
1416 into subroutines.
1417 Since frames are computed differently when we're replaying, we need to
1418 recompute those stored frames and fix them up so we can still detect
1419 subroutines after we started replaying. */
1420 TRY_CATCH (except, RETURN_MASK_ALL)
1421 {
1422 struct frame_info *frame;
1423 struct frame_id frame_id;
1424 int upd_step_frame_id, upd_step_stack_frame_id;
1425
1426 /* The current frame without replaying - computed via normal unwind. */
1427 frame = get_current_frame ();
1428 frame_id = get_frame_id (frame);
1429
1430 /* Check if we need to update any stepping-related frame id's. */
1431 upd_step_frame_id = frame_id_eq (frame_id,
1432 tp->control.step_frame_id);
1433 upd_step_stack_frame_id = frame_id_eq (frame_id,
1434 tp->control.step_stack_frame_id);
1435
1436 /* We start replaying at the end of the branch trace. This corresponds
1437 to the current instruction. */
1438 replay = xmalloc (sizeof (*replay));
1439 btrace_insn_end (replay, btinfo);
1440
1441 /* We're not replaying, yet. */
1442 gdb_assert (btinfo->replay == NULL);
1443 btinfo->replay = replay;
1444
1445 /* Make sure we're not using any stale registers. */
1446 registers_changed_ptid (tp->ptid);
1447
1448 /* The current frame with replaying - computed via btrace unwind. */
1449 frame = get_current_frame ();
1450 frame_id = get_frame_id (frame);
1451
1452 /* Replace stepping related frames where necessary. */
1453 if (upd_step_frame_id)
1454 tp->control.step_frame_id = frame_id;
1455 if (upd_step_stack_frame_id)
1456 tp->control.step_stack_frame_id = frame_id;
1457 }
1458
1459 /* Restore the previous execution state. */
1460 set_executing (tp->ptid, executing);
1461
1462 if (except.reason < 0)
1463 {
1464 xfree (btinfo->replay);
1465 btinfo->replay = NULL;
1466
1467 registers_changed_ptid (tp->ptid);
1468
1469 throw_exception (except);
1470 }
1471
1472 return replay;
1473}
1474
1475/* Stop replaying a thread. */
1476
1477static void
1478record_btrace_stop_replaying (struct thread_info *tp)
1479{
1480 struct btrace_thread_info *btinfo;
1481
1482 btinfo = &tp->btrace;
1483
1484 xfree (btinfo->replay);
1485 btinfo->replay = NULL;
1486
1487 /* Make sure we're not leaving any stale registers. */
1488 registers_changed_ptid (tp->ptid);
1489}
1490
b2f4cfde
MM
1491/* The to_resume method of target record-btrace. */
1492
1493static void
1494record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1495 enum gdb_signal signal)
1496{
52834460
MM
1497 struct thread_info *tp, *other;
1498 enum btrace_thread_flag flag;
1499
1500 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1501
70ad5bff
MM
1502 /* Store the execution direction of the last resume. */
1503 record_btrace_resume_exec_dir = execution_direction;
1504
52834460
MM
1505 tp = record_btrace_find_resume_thread (ptid);
1506 if (tp == NULL)
1507 error (_("Cannot find thread to resume."));
1508
1509 /* Stop replaying other threads if the thread to resume is not replaying. */
1510 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
034f788c 1511 ALL_NON_EXITED_THREADS (other)
52834460
MM
1512 record_btrace_stop_replaying (other);
1513
b2f4cfde 1514 /* As long as we're not replaying, just forward the request. */
1c63c994 1515 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1516 {
e75fdfca
TT
1517 ops = ops->beneath;
1518 return ops->to_resume (ops, ptid, step, signal);
b2f4cfde
MM
1519 }
1520
52834460
MM
1521 /* Compute the btrace thread flag for the requested move. */
1522 if (step == 0)
1523 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1524 else
1525 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1526
1527 /* At the moment, we only move a single thread. We could also move
1528 all threads in parallel by single-stepping each resumed thread
1529 until the first runs into an event.
1530 When we do that, we would want to continue all other threads.
1531 For now, just resume one thread to not confuse to_wait. */
1532 record_btrace_resume_thread (tp, flag);
1533
1534 /* We just indicate the resume intent here. The actual stepping happens in
1535 record_btrace_wait below. */
70ad5bff
MM
1536
1537 /* Async support. */
1538 if (target_can_async_p ())
1539 {
1540 target_async (inferior_event_handler, 0);
1541 mark_async_event_handler (record_btrace_async_inferior_event_handler);
1542 }
52834460
MM
1543}
1544
1545/* Find a thread to move. */
1546
1547static struct thread_info *
1548record_btrace_find_thread_to_move (ptid_t ptid)
1549{
1550 struct thread_info *tp;
1551
1552 /* First check the parameter thread. */
1553 tp = find_thread_ptid (ptid);
1554 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1555 return tp;
1556
1557 /* Otherwise, find one other thread that has been resumed. */
034f788c 1558 ALL_NON_EXITED_THREADS (tp)
52834460
MM
1559 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1560 return tp;
1561
1562 return NULL;
1563}
1564
1565/* Return a target_waitstatus indicating that we ran out of history. */
1566
1567static struct target_waitstatus
1568btrace_step_no_history (void)
1569{
1570 struct target_waitstatus status;
1571
1572 status.kind = TARGET_WAITKIND_NO_HISTORY;
1573
1574 return status;
1575}
1576
1577/* Return a target_waitstatus indicating that a step finished. */
1578
1579static struct target_waitstatus
1580btrace_step_stopped (void)
1581{
1582 struct target_waitstatus status;
1583
1584 status.kind = TARGET_WAITKIND_STOPPED;
1585 status.value.sig = GDB_SIGNAL_TRAP;
1586
1587 return status;
1588}
1589
1590/* Clear the record histories. */
1591
1592static void
1593record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1594{
1595 xfree (btinfo->insn_history);
1596 xfree (btinfo->call_history);
1597
1598 btinfo->insn_history = NULL;
1599 btinfo->call_history = NULL;
1600}
1601
1602/* Step a single thread. */
1603
1604static struct target_waitstatus
1605record_btrace_step_thread (struct thread_info *tp)
1606{
1607 struct btrace_insn_iterator *replay, end;
1608 struct btrace_thread_info *btinfo;
1609 struct address_space *aspace;
1610 struct inferior *inf;
1611 enum btrace_thread_flag flags;
1612 unsigned int steps;
1613
e59fa00f
MM
1614 /* We can't step without an execution history. */
1615 if (btrace_is_empty (tp))
1616 return btrace_step_no_history ();
1617
52834460
MM
1618 btinfo = &tp->btrace;
1619 replay = btinfo->replay;
1620
1621 flags = btinfo->flags & BTHR_MOVE;
1622 btinfo->flags &= ~BTHR_MOVE;
1623
1624 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1625
1626 switch (flags)
1627 {
1628 default:
1629 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1630
1631 case BTHR_STEP:
1632 /* We're done if we're not replaying. */
1633 if (replay == NULL)
1634 return btrace_step_no_history ();
1635
1636 /* We are always able to step at least once. */
1637 steps = btrace_insn_next (replay, 1);
1638 gdb_assert (steps == 1);
1639
1640 /* Determine the end of the instruction trace. */
1641 btrace_insn_end (&end, btinfo);
1642
1643 /* We stop replaying if we reached the end of the trace. */
1644 if (btrace_insn_cmp (replay, &end) == 0)
1645 record_btrace_stop_replaying (tp);
1646
1647 return btrace_step_stopped ();
1648
1649 case BTHR_RSTEP:
1650 /* Start replaying if we're not already doing so. */
1651 if (replay == NULL)
1652 replay = record_btrace_start_replaying (tp);
1653
1654 /* If we can't step any further, we reached the end of the history. */
1655 steps = btrace_insn_prev (replay, 1);
1656 if (steps == 0)
1657 return btrace_step_no_history ();
1658
1659 return btrace_step_stopped ();
1660
1661 case BTHR_CONT:
1662 /* We're done if we're not replaying. */
1663 if (replay == NULL)
1664 return btrace_step_no_history ();
1665
1666 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1667 aspace = inf->aspace;
1668
1669 /* Determine the end of the instruction trace. */
1670 btrace_insn_end (&end, btinfo);
1671
1672 for (;;)
1673 {
1674 const struct btrace_insn *insn;
1675
1676 /* We are always able to step at least once. */
1677 steps = btrace_insn_next (replay, 1);
1678 gdb_assert (steps == 1);
1679
1680 /* We stop replaying if we reached the end of the trace. */
1681 if (btrace_insn_cmp (replay, &end) == 0)
1682 {
1683 record_btrace_stop_replaying (tp);
1684 return btrace_step_no_history ();
1685 }
1686
1687 insn = btrace_insn_get (replay);
1688 gdb_assert (insn);
1689
1690 DEBUG ("stepping %d (%s) ... %s", tp->num,
1691 target_pid_to_str (tp->ptid),
1692 core_addr_to_string_nz (insn->pc));
1693
1694 if (breakpoint_here_p (aspace, insn->pc))
1695 return btrace_step_stopped ();
1696 }
1697
1698 case BTHR_RCONT:
1699 /* Start replaying if we're not already doing so. */
1700 if (replay == NULL)
1701 replay = record_btrace_start_replaying (tp);
1702
1703 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1704 aspace = inf->aspace;
1705
1706 for (;;)
1707 {
1708 const struct btrace_insn *insn;
1709
1710 /* If we can't step any further, we're done. */
1711 steps = btrace_insn_prev (replay, 1);
1712 if (steps == 0)
1713 return btrace_step_no_history ();
1714
1715 insn = btrace_insn_get (replay);
1716 gdb_assert (insn);
1717
1718 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1719 target_pid_to_str (tp->ptid),
1720 core_addr_to_string_nz (insn->pc));
1721
1722 if (breakpoint_here_p (aspace, insn->pc))
1723 return btrace_step_stopped ();
1724 }
1725 }
b2f4cfde
MM
1726}
1727
1728/* The to_wait method of target record-btrace. */
1729
1730static ptid_t
1731record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1732 struct target_waitstatus *status, int options)
1733{
52834460
MM
1734 struct thread_info *tp, *other;
1735
1736 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1737
b2f4cfde 1738 /* As long as we're not replaying, just forward the request. */
1c63c994 1739 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde 1740 {
e75fdfca
TT
1741 ops = ops->beneath;
1742 return ops->to_wait (ops, ptid, status, options);
b2f4cfde
MM
1743 }
1744
52834460
MM
1745 /* Let's find a thread to move. */
1746 tp = record_btrace_find_thread_to_move (ptid);
1747 if (tp == NULL)
1748 {
1749 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1750
1751 status->kind = TARGET_WAITKIND_IGNORE;
1752 return minus_one_ptid;
1753 }
1754
1755 /* We only move a single thread. We're not able to correlate threads. */
1756 *status = record_btrace_step_thread (tp);
1757
1758 /* Stop all other threads. */
1759 if (!non_stop)
034f788c 1760 ALL_NON_EXITED_THREADS (other)
52834460
MM
1761 other->btrace.flags &= ~BTHR_MOVE;
1762
1763 /* Start record histories anew from the current position. */
1764 record_btrace_clear_histories (&tp->btrace);
1765
1766 /* We moved the replay position but did not update registers. */
1767 registers_changed_ptid (tp->ptid);
1768
1769 return tp->ptid;
1770}
1771
1772/* The to_can_execute_reverse method of target record-btrace. */
1773
1774static int
19db3e69 1775record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1776{
1777 return 1;
1778}
1779
1780/* The to_decr_pc_after_break method of target record-btrace. */
1781
1782static CORE_ADDR
1783record_btrace_decr_pc_after_break (struct target_ops *ops,
1784 struct gdbarch *gdbarch)
1785{
1786 /* When replaying, we do not actually execute the breakpoint instruction
1787 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1788 if (record_btrace_is_replaying (ops))
52834460
MM
1789 return 0;
1790
c0eca49f 1791 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1792}
1793
e2887aa3
MM
1794/* The to_find_new_threads method of target record-btrace. */
1795
1796static void
1797record_btrace_find_new_threads (struct target_ops *ops)
1798{
1799 /* Don't expect new threads if we're replaying. */
1c63c994 1800 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1801 return;
1802
1803 /* Forward the request. */
e75fdfca
TT
1804 ops = ops->beneath;
1805 ops->to_find_new_threads (ops);
e2887aa3
MM
1806}
1807
1808/* The to_thread_alive method of target record-btrace. */
1809
1810static int
1811record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1812{
1813 /* We don't add or remove threads during replay. */
1c63c994 1814 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1815 return find_thread_ptid (ptid) != NULL;
1816
1817 /* Forward the request. */
e75fdfca
TT
1818 ops = ops->beneath;
1819 return ops->to_thread_alive (ops, ptid);
e2887aa3
MM
1820}
1821
066ce621
MM
1822/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1823 is stopped. */
1824
1825static void
1826record_btrace_set_replay (struct thread_info *tp,
1827 const struct btrace_insn_iterator *it)
1828{
1829 struct btrace_thread_info *btinfo;
1830
1831 btinfo = &tp->btrace;
1832
1833 if (it == NULL || it->function == NULL)
52834460 1834 record_btrace_stop_replaying (tp);
066ce621
MM
1835 else
1836 {
1837 if (btinfo->replay == NULL)
52834460 1838 record_btrace_start_replaying (tp);
066ce621
MM
1839 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1840 return;
1841
1842 *btinfo->replay = *it;
52834460 1843 registers_changed_ptid (tp->ptid);
066ce621
MM
1844 }
1845
52834460
MM
1846 /* Start anew from the new replay position. */
1847 record_btrace_clear_histories (btinfo);
066ce621
MM
1848}
1849
1850/* The to_goto_record_begin method of target record-btrace. */
1851
1852static void
08475817 1853record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1854{
1855 struct thread_info *tp;
1856 struct btrace_insn_iterator begin;
1857
1858 tp = require_btrace_thread ();
1859
1860 btrace_insn_begin (&begin, &tp->btrace);
1861 record_btrace_set_replay (tp, &begin);
1862
1863 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1864}
1865
1866/* The to_goto_record_end method of target record-btrace. */
1867
1868static void
307a1b91 1869record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1870{
1871 struct thread_info *tp;
1872
1873 tp = require_btrace_thread ();
1874
1875 record_btrace_set_replay (tp, NULL);
1876
1877 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1878}
1879
1880/* The to_goto_record method of target record-btrace. */
1881
1882static void
606183ac 1883record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1884{
1885 struct thread_info *tp;
1886 struct btrace_insn_iterator it;
1887 unsigned int number;
1888 int found;
1889
1890 number = insn;
1891
1892 /* Check for wrap-arounds. */
1893 if (number != insn)
1894 error (_("Instruction number out of range."));
1895
1896 tp = require_btrace_thread ();
1897
1898 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1899 if (found == 0)
1900 error (_("No such instruction."));
1901
1902 record_btrace_set_replay (tp, &it);
1903
1904 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1905}
1906
70ad5bff
MM
1907/* The to_execution_direction target method. */
1908
1909static enum exec_direction_kind
1910record_btrace_execution_direction (struct target_ops *self)
1911{
1912 return record_btrace_resume_exec_dir;
1913}
1914
aef92902
MM
1915/* The to_prepare_to_generate_core target method. */
1916
1917static void
1918record_btrace_prepare_to_generate_core (struct target_ops *self)
1919{
1920 record_btrace_generating_corefile = 1;
1921}
1922
1923/* The to_done_generating_core target method. */
1924
1925static void
1926record_btrace_done_generating_core (struct target_ops *self)
1927{
1928 record_btrace_generating_corefile = 0;
1929}
1930
afedecd3
MM
1931/* Initialize the record-btrace target ops. */
1932
1933static void
1934init_record_btrace_ops (void)
1935{
1936 struct target_ops *ops;
1937
1938 ops = &record_btrace_ops;
1939 ops->to_shortname = "record-btrace";
1940 ops->to_longname = "Branch tracing target";
1941 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1942 ops->to_open = record_btrace_open;
1943 ops->to_close = record_btrace_close;
1944 ops->to_detach = record_detach;
1945 ops->to_disconnect = record_disconnect;
1946 ops->to_mourn_inferior = record_mourn_inferior;
1947 ops->to_kill = record_kill;
afedecd3
MM
1948 ops->to_stop_recording = record_btrace_stop_recording;
1949 ops->to_info_record = record_btrace_info;
1950 ops->to_insn_history = record_btrace_insn_history;
1951 ops->to_insn_history_from = record_btrace_insn_history_from;
1952 ops->to_insn_history_range = record_btrace_insn_history_range;
1953 ops->to_call_history = record_btrace_call_history;
1954 ops->to_call_history_from = record_btrace_call_history_from;
1955 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1956 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1957 ops->to_xfer_partial = record_btrace_xfer_partial;
1958 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1959 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1960 ops->to_fetch_registers = record_btrace_fetch_registers;
1961 ops->to_store_registers = record_btrace_store_registers;
1962 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
1963 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1964 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
1965 ops->to_resume = record_btrace_resume;
1966 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1967 ops->to_find_new_threads = record_btrace_find_new_threads;
1968 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1969 ops->to_goto_record_begin = record_btrace_goto_begin;
1970 ops->to_goto_record_end = record_btrace_goto_end;
1971 ops->to_goto_record = record_btrace_goto;
52834460
MM
1972 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1973 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
70ad5bff 1974 ops->to_execution_direction = record_btrace_execution_direction;
aef92902
MM
1975 ops->to_prepare_to_generate_core = record_btrace_prepare_to_generate_core;
1976 ops->to_done_generating_core = record_btrace_done_generating_core;
afedecd3
MM
1977 ops->to_stratum = record_stratum;
1978 ops->to_magic = OPS_MAGIC;
1979}
1980
1981/* Alias for "target record". */
1982
1983static void
1984cmd_record_btrace_start (char *args, int from_tty)
1985{
1986 if (args != NULL && *args != 0)
1987 error (_("Invalid argument."));
1988
1989 execute_command ("target record-btrace", from_tty);
1990}
1991
67b5c0c1
MM
1992/* The "set record btrace" command. */
1993
1994static void
1995cmd_set_record_btrace (char *args, int from_tty)
1996{
1997 cmd_show_list (set_record_btrace_cmdlist, from_tty, "");
1998}
1999
2000/* The "show record btrace" command. */
2001
2002static void
2003cmd_show_record_btrace (char *args, int from_tty)
2004{
2005 cmd_show_list (show_record_btrace_cmdlist, from_tty, "");
2006}
2007
2008/* The "show record btrace replay-memory-access" command. */
2009
2010static void
2011cmd_show_replay_memory_access (struct ui_file *file, int from_tty,
2012 struct cmd_list_element *c, const char *value)
2013{
2014 fprintf_filtered (gdb_stdout, _("Replay memory access is %s.\n"),
2015 replay_memory_access);
2016}
2017
afedecd3
MM
2018void _initialize_record_btrace (void);
2019
2020/* Initialize btrace commands. */
2021
2022void
2023_initialize_record_btrace (void)
2024{
2025 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
2026 _("Start branch trace recording."),
2027 &record_cmdlist);
2028 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
2029
67b5c0c1
MM
2030 add_prefix_cmd ("btrace", class_support, cmd_set_record_btrace,
2031 _("Set record options"), &set_record_btrace_cmdlist,
2032 "set record btrace ", 0, &set_record_cmdlist);
2033
2034 add_prefix_cmd ("btrace", class_support, cmd_show_record_btrace,
2035 _("Show record options"), &show_record_btrace_cmdlist,
2036 "show record btrace ", 0, &show_record_cmdlist);
2037
2038 add_setshow_enum_cmd ("replay-memory-access", no_class,
2039 replay_memory_access_types, &replay_memory_access, _("\
2040Set what memory accesses are allowed during replay."), _("\
2041Show what memory accesses are allowed during replay."),
2042 _("Default is READ-ONLY.\n\n\
2043The btrace record target does not trace data.\n\
2044The memory therefore corresponds to the live target and not \
2045to the current replay position.\n\n\
2046When READ-ONLY, allow accesses to read-only memory during replay.\n\
2047When READ-WRITE, allow accesses to read-only and read-write memory during \
2048replay."),
2049 NULL, cmd_show_replay_memory_access,
2050 &set_record_btrace_cmdlist,
2051 &show_record_btrace_cmdlist);
2052
afedecd3
MM
2053 init_record_btrace_ops ();
2054 add_target (&record_btrace_ops);
0b722aec
MM
2055
2056 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
2057 xcalloc, xfree);
afedecd3 2058}