]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
Tweak target_xfer_status_to_string
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "exceptions.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
afedecd3
MM
38
39/* The target_ops of record-btrace. */
40static struct target_ops record_btrace_ops;
41
42/* A new thread observer enabling branch tracing for the new thread. */
43static struct observer *record_btrace_thread_observer;
44
633785ff
MM
45/* Temporarily allow memory accesses. */
46static int record_btrace_allow_memory_access;
47
afedecd3
MM
48/* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51#define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61/* Update the branch trace for the current thread and return a pointer to its
066ce621 62 thread_info.
afedecd3
MM
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
066ce621
MM
67static struct thread_info *
68require_btrace_thread (void)
afedecd3
MM
69{
70 struct thread_info *tp;
afedecd3
MM
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
6e07b1d2 80 if (btrace_is_empty (tp))
afedecd3
MM
81 error (_("No trace."));
82
066ce621
MM
83 return tp;
84}
85
86/* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92static struct btrace_thread_info *
93require_btrace (void)
94{
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
afedecd3
MM
100}
101
102/* Enable branch tracing for one thread. Warn on errors. */
103
104static void
105record_btrace_enable_warn (struct thread_info *tp)
106{
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114}
115
116/* Callback function to disable branch tracing for one thread. */
117
118static void
119record_btrace_disable_callback (void *arg)
120{
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126}
127
128/* Enable automatic tracing of new threads. */
129
130static void
131record_btrace_auto_enable (void)
132{
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137}
138
139/* Disable automatic tracing of new threads. */
140
141static void
142record_btrace_auto_disable (void)
143{
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152}
153
154/* The to_open method of target record-btrace. */
155
156static void
157record_btrace_open (char *args, int from_tty)
158{
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
8213266a 164 record_preopen ();
afedecd3
MM
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
52834460
MM
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
afedecd3
MM
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193}
194
195/* The to_stop_recording method of target record-btrace. */
196
197static void
c6cd7c02 198record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
199{
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209}
210
211/* The to_close method of target record-btrace. */
212
213static void
de90e03d 214record_btrace_close (struct target_ops *self)
afedecd3 215{
568e808b
MM
216 struct thread_info *tp;
217
99c819ee
MM
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
221
568e808b
MM
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
afedecd3
MM
226}
227
228/* The to_info_record method of target record-btrace. */
229
230static void
630d6a4a 231record_btrace_info (struct target_ops *self)
afedecd3
MM
232{
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
23a7fe75 235 unsigned int insns, calls;
afedecd3
MM
236
237 DEBUG ("info");
238
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
242
243 btrace_fetch (tp);
244
23a7fe75
MM
245 insns = 0;
246 calls = 0;
247
afedecd3 248 btinfo = &tp->btrace;
6e07b1d2
MM
249
250 if (!btrace_is_empty (tp))
23a7fe75
MM
251 {
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
254
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
5de9129b 257 calls = btrace_call_number (&call);
23a7fe75
MM
258
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
5de9129b 261 insns = btrace_insn_number (&insn);
23a7fe75 262 }
afedecd3
MM
263
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 265 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 266 target_pid_to_str (tp->ptid));
07bbe694
MM
267
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
afedecd3
MM
271}
272
273/* Print an unsigned int. */
274
275static void
276ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
277{
278 ui_out_field_fmt (uiout, fld, "%u", val);
279}
280
281/* Disassemble a section of the recorded instruction trace. */
282
283static void
23a7fe75
MM
284btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
287{
288 struct gdbarch *gdbarch;
23a7fe75 289 struct btrace_insn_iterator it;
afedecd3 290
23a7fe75
MM
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
afedecd3
MM
293
294 gdbarch = target_gdbarch ();
295
23a7fe75 296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 297 {
23a7fe75
MM
298 const struct btrace_insn *insn;
299
300 insn = btrace_insn_get (&it);
301
afedecd3 302 /* Print the instruction index. */
23a7fe75 303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
304 ui_out_text (uiout, "\t");
305
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
23a7fe75 308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
309 }
310}
311
312/* The to_insn_history method of target record-btrace. */
313
314static void
7a6c5609 315record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
316{
317 struct btrace_thread_info *btinfo;
23a7fe75
MM
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
afedecd3
MM
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
23a7fe75 322 unsigned int context, covered;
afedecd3
MM
323
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
afedecd3 327 context = abs (size);
afedecd3
MM
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
330
23a7fe75
MM
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
afedecd3 334 {
07bbe694 335 struct btrace_insn_iterator *replay;
afedecd3 336
23a7fe75 337 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 338
07bbe694
MM
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
346
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
352 {
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
357 }
358 else
359 {
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
362 }
afedecd3
MM
363 }
364 else
365 {
23a7fe75
MM
366 begin = history->begin;
367 end = history->end;
afedecd3 368
23a7fe75
MM
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 371
23a7fe75
MM
372 if (size < 0)
373 {
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
376 }
377 else
378 {
379 begin = end;
380 covered = btrace_insn_next (&end, context);
381 }
afedecd3
MM
382 }
383
23a7fe75
MM
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
387 {
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
392 }
afedecd3 393
23a7fe75 394 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
395 do_cleanups (uiout_cleanup);
396}
397
398/* The to_insn_history_range method of target record-btrace. */
399
400static void
4e99c6b7
TT
401record_btrace_insn_history_range (struct target_ops *self,
402 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
403{
404 struct btrace_thread_info *btinfo;
23a7fe75
MM
405 struct btrace_insn_history *history;
406 struct btrace_insn_iterator begin, end;
afedecd3
MM
407 struct cleanup *uiout_cleanup;
408 struct ui_out *uiout;
23a7fe75
MM
409 unsigned int low, high;
410 int found;
afedecd3
MM
411
412 uiout = current_uiout;
413 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
414 "insn history");
23a7fe75
MM
415 low = from;
416 high = to;
afedecd3 417
23a7fe75 418 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
419
420 /* Check for wrap-arounds. */
23a7fe75 421 if (low != from || high != to)
afedecd3
MM
422 error (_("Bad range."));
423
0688d04e 424 if (high < low)
afedecd3
MM
425 error (_("Bad range."));
426
23a7fe75 427 btinfo = require_btrace ();
afedecd3 428
23a7fe75
MM
429 found = btrace_find_insn_by_number (&begin, btinfo, low);
430 if (found == 0)
431 error (_("Range out of bounds."));
afedecd3 432
23a7fe75
MM
433 found = btrace_find_insn_by_number (&end, btinfo, high);
434 if (found == 0)
0688d04e
MM
435 {
436 /* Silently truncate the range. */
437 btrace_insn_end (&end, btinfo);
438 }
439 else
440 {
441 /* We want both begin and end to be inclusive. */
442 btrace_insn_next (&end, 1);
443 }
afedecd3 444
23a7fe75
MM
445 btrace_insn_history (uiout, &begin, &end, flags);
446 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
447
448 do_cleanups (uiout_cleanup);
449}
450
451/* The to_insn_history_from method of target record-btrace. */
452
453static void
9abc3ff3
TT
454record_btrace_insn_history_from (struct target_ops *self,
455 ULONGEST from, int size, int flags)
afedecd3
MM
456{
457 ULONGEST begin, end, context;
458
459 context = abs (size);
0688d04e
MM
460 if (context == 0)
461 error (_("Bad record instruction-history-size."));
afedecd3
MM
462
463 if (size < 0)
464 {
465 end = from;
466
467 if (from < context)
468 begin = 0;
469 else
0688d04e 470 begin = from - context + 1;
afedecd3
MM
471 }
472 else
473 {
474 begin = from;
0688d04e 475 end = from + context - 1;
afedecd3
MM
476
477 /* Check for wrap-around. */
478 if (end < begin)
479 end = ULONGEST_MAX;
480 }
481
4e99c6b7 482 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
483}
484
485/* Print the instruction number range for a function call history line. */
486
487static void
23a7fe75
MM
488btrace_call_history_insn_range (struct ui_out *uiout,
489 const struct btrace_function *bfun)
afedecd3 490{
7acbe133
MM
491 unsigned int begin, end, size;
492
493 size = VEC_length (btrace_insn_s, bfun->insn);
494 gdb_assert (size > 0);
afedecd3 495
23a7fe75 496 begin = bfun->insn_offset;
7acbe133 497 end = begin + size - 1;
afedecd3 498
23a7fe75 499 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 500 ui_out_text (uiout, ",");
23a7fe75 501 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
502}
503
504/* Print the source line information for a function call history line. */
505
506static void
23a7fe75
MM
507btrace_call_history_src_line (struct ui_out *uiout,
508 const struct btrace_function *bfun)
afedecd3
MM
509{
510 struct symbol *sym;
23a7fe75 511 int begin, end;
afedecd3
MM
512
513 sym = bfun->sym;
514 if (sym == NULL)
515 return;
516
517 ui_out_field_string (uiout, "file",
518 symtab_to_filename_for_display (sym->symtab));
519
23a7fe75
MM
520 begin = bfun->lbegin;
521 end = bfun->lend;
522
523 if (end < begin)
afedecd3
MM
524 return;
525
526 ui_out_text (uiout, ":");
23a7fe75 527 ui_out_field_int (uiout, "min line", begin);
afedecd3 528
23a7fe75 529 if (end == begin)
afedecd3
MM
530 return;
531
8710b709 532 ui_out_text (uiout, ",");
23a7fe75 533 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
534}
535
0b722aec
MM
536/* Get the name of a branch trace function. */
537
538static const char *
539btrace_get_bfun_name (const struct btrace_function *bfun)
540{
541 struct minimal_symbol *msym;
542 struct symbol *sym;
543
544 if (bfun == NULL)
545 return "??";
546
547 msym = bfun->msym;
548 sym = bfun->sym;
549
550 if (sym != NULL)
551 return SYMBOL_PRINT_NAME (sym);
552 else if (msym != NULL)
553 return SYMBOL_PRINT_NAME (msym);
554 else
555 return "??";
556}
557
afedecd3
MM
558/* Disassemble a section of the recorded function trace. */
559
560static void
23a7fe75 561btrace_call_history (struct ui_out *uiout,
8710b709 562 const struct btrace_thread_info *btinfo,
23a7fe75
MM
563 const struct btrace_call_iterator *begin,
564 const struct btrace_call_iterator *end,
afedecd3
MM
565 enum record_print_flag flags)
566{
23a7fe75 567 struct btrace_call_iterator it;
afedecd3 568
23a7fe75
MM
569 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
570 btrace_call_number (end));
afedecd3 571
23a7fe75 572 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 573 {
23a7fe75
MM
574 const struct btrace_function *bfun;
575 struct minimal_symbol *msym;
576 struct symbol *sym;
577
578 bfun = btrace_call_get (&it);
23a7fe75 579 sym = bfun->sym;
0b722aec 580 msym = bfun->msym;
23a7fe75 581
afedecd3 582 /* Print the function index. */
23a7fe75 583 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
584 ui_out_text (uiout, "\t");
585
8710b709
MM
586 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
587 {
588 int level = bfun->level + btinfo->level, i;
589
590 for (i = 0; i < level; ++i)
591 ui_out_text (uiout, " ");
592 }
593
594 if (sym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
596 else if (msym != NULL)
597 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
598 else if (!ui_out_is_mi_like_p (uiout))
599 ui_out_field_string (uiout, "function", "??");
600
1e038f67 601 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 602 {
8710b709 603 ui_out_text (uiout, _("\tinst "));
23a7fe75 604 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
605 }
606
1e038f67 607 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 608 {
8710b709 609 ui_out_text (uiout, _("\tat "));
23a7fe75 610 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
611 }
612
afedecd3
MM
613 ui_out_text (uiout, "\n");
614 }
615}
616
617/* The to_call_history method of target record-btrace. */
618
619static void
5df2fcba 620record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
621{
622 struct btrace_thread_info *btinfo;
23a7fe75
MM
623 struct btrace_call_history *history;
624 struct btrace_call_iterator begin, end;
afedecd3
MM
625 struct cleanup *uiout_cleanup;
626 struct ui_out *uiout;
23a7fe75 627 unsigned int context, covered;
afedecd3
MM
628
629 uiout = current_uiout;
630 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
631 "insn history");
afedecd3 632 context = abs (size);
afedecd3
MM
633 if (context == 0)
634 error (_("Bad record function-call-history-size."));
635
23a7fe75
MM
636 btinfo = require_btrace ();
637 history = btinfo->call_history;
638 if (history == NULL)
afedecd3 639 {
07bbe694 640 struct btrace_insn_iterator *replay;
afedecd3 641
23a7fe75 642 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 643
07bbe694
MM
644 /* If we're replaying, we start at the replay position. Otherwise, we
645 start at the tail of the trace. */
646 replay = btinfo->replay;
647 if (replay != NULL)
648 {
649 begin.function = replay->function;
650 begin.btinfo = btinfo;
651 }
652 else
653 btrace_call_end (&begin, btinfo);
654
655 /* We start from here and expand in the requested direction. Then we
656 expand in the other direction, as well, to fill up any remaining
657 context. */
658 end = begin;
659 if (size < 0)
660 {
661 /* We want the current position covered, as well. */
662 covered = btrace_call_next (&end, 1);
663 covered += btrace_call_prev (&begin, context - covered);
664 covered += btrace_call_next (&end, context - covered);
665 }
666 else
667 {
668 covered = btrace_call_next (&end, context);
669 covered += btrace_call_prev (&begin, context- covered);
670 }
afedecd3
MM
671 }
672 else
673 {
23a7fe75
MM
674 begin = history->begin;
675 end = history->end;
afedecd3 676
23a7fe75
MM
677 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
678 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 679
23a7fe75
MM
680 if (size < 0)
681 {
682 end = begin;
683 covered = btrace_call_prev (&begin, context);
684 }
685 else
686 {
687 begin = end;
688 covered = btrace_call_next (&end, context);
689 }
afedecd3
MM
690 }
691
23a7fe75 692 if (covered > 0)
8710b709 693 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
694 else
695 {
696 if (size < 0)
697 printf_unfiltered (_("At the start of the branch trace record.\n"));
698 else
699 printf_unfiltered (_("At the end of the branch trace record.\n"));
700 }
afedecd3 701
23a7fe75 702 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
703 do_cleanups (uiout_cleanup);
704}
705
706/* The to_call_history_range method of target record-btrace. */
707
708static void
f0d960ea
TT
709record_btrace_call_history_range (struct target_ops *self,
710 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
711{
712 struct btrace_thread_info *btinfo;
23a7fe75
MM
713 struct btrace_call_history *history;
714 struct btrace_call_iterator begin, end;
afedecd3
MM
715 struct cleanup *uiout_cleanup;
716 struct ui_out *uiout;
23a7fe75
MM
717 unsigned int low, high;
718 int found;
afedecd3
MM
719
720 uiout = current_uiout;
721 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
722 "func history");
23a7fe75
MM
723 low = from;
724 high = to;
afedecd3 725
23a7fe75 726 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
727
728 /* Check for wrap-arounds. */
23a7fe75 729 if (low != from || high != to)
afedecd3
MM
730 error (_("Bad range."));
731
0688d04e 732 if (high < low)
afedecd3
MM
733 error (_("Bad range."));
734
23a7fe75 735 btinfo = require_btrace ();
afedecd3 736
23a7fe75
MM
737 found = btrace_find_call_by_number (&begin, btinfo, low);
738 if (found == 0)
739 error (_("Range out of bounds."));
afedecd3 740
23a7fe75
MM
741 found = btrace_find_call_by_number (&end, btinfo, high);
742 if (found == 0)
0688d04e
MM
743 {
744 /* Silently truncate the range. */
745 btrace_call_end (&end, btinfo);
746 }
747 else
748 {
749 /* We want both begin and end to be inclusive. */
750 btrace_call_next (&end, 1);
751 }
afedecd3 752
8710b709 753 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 754 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
755
756 do_cleanups (uiout_cleanup);
757}
758
759/* The to_call_history_from method of target record-btrace. */
760
761static void
ec0aea04
TT
762record_btrace_call_history_from (struct target_ops *self,
763 ULONGEST from, int size, int flags)
afedecd3
MM
764{
765 ULONGEST begin, end, context;
766
767 context = abs (size);
0688d04e
MM
768 if (context == 0)
769 error (_("Bad record function-call-history-size."));
afedecd3
MM
770
771 if (size < 0)
772 {
773 end = from;
774
775 if (from < context)
776 begin = 0;
777 else
0688d04e 778 begin = from - context + 1;
afedecd3
MM
779 }
780 else
781 {
782 begin = from;
0688d04e 783 end = from + context - 1;
afedecd3
MM
784
785 /* Check for wrap-around. */
786 if (end < begin)
787 end = ULONGEST_MAX;
788 }
789
f0d960ea 790 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
791}
792
07bbe694
MM
793/* The to_record_is_replaying method of target record-btrace. */
794
795static int
1c63c994 796record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
797{
798 struct thread_info *tp;
799
800 ALL_THREADS (tp)
801 if (btrace_is_replaying (tp))
802 return 1;
803
804 return 0;
805}
806
633785ff
MM
807/* The to_xfer_partial method of target record-btrace. */
808
9b409511 809static enum target_xfer_status
633785ff
MM
810record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
811 const char *annex, gdb_byte *readbuf,
812 const gdb_byte *writebuf, ULONGEST offset,
9b409511 813 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
814{
815 struct target_ops *t;
816
817 /* Filter out requests that don't make sense during replay. */
1c63c994 818 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
633785ff
MM
819 {
820 switch (object)
821 {
822 case TARGET_OBJECT_MEMORY:
823 {
824 struct target_section *section;
825
826 /* We do not allow writing memory in general. */
827 if (writebuf != NULL)
9b409511
YQ
828 {
829 *xfered_len = len;
830 return TARGET_XFER_E_UNAVAILABLE;
831 }
633785ff
MM
832
833 /* We allow reading readonly memory. */
834 section = target_section_by_addr (ops, offset);
835 if (section != NULL)
836 {
837 /* Check if the section we found is readonly. */
838 if ((bfd_get_section_flags (section->the_bfd_section->owner,
839 section->the_bfd_section)
840 & SEC_READONLY) != 0)
841 {
842 /* Truncate the request to fit into this section. */
843 len = min (len, section->endaddr - offset);
844 break;
845 }
846 }
847
9b409511 848 *xfered_len = len;
633785ff
MM
849 return TARGET_XFER_E_UNAVAILABLE;
850 }
851 }
852 }
853
854 /* Forward the request. */
855 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
856 if (ops->to_xfer_partial != NULL)
857 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 858 offset, len, xfered_len);
633785ff 859
9b409511 860 *xfered_len = len;
633785ff
MM
861 return TARGET_XFER_E_UNAVAILABLE;
862}
863
864/* The to_insert_breakpoint method of target record-btrace. */
865
866static int
867record_btrace_insert_breakpoint (struct target_ops *ops,
868 struct gdbarch *gdbarch,
869 struct bp_target_info *bp_tgt)
870{
871 volatile struct gdb_exception except;
872 int old, ret;
873
874 /* Inserting breakpoints requires accessing memory. Allow it for the
875 duration of this function. */
876 old = record_btrace_allow_memory_access;
877 record_btrace_allow_memory_access = 1;
878
879 ret = 0;
880 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 881 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff
MM
882
883 record_btrace_allow_memory_access = old;
884
885 if (except.reason < 0)
886 throw_exception (except);
887
888 return ret;
889}
890
891/* The to_remove_breakpoint method of target record-btrace. */
892
893static int
894record_btrace_remove_breakpoint (struct target_ops *ops,
895 struct gdbarch *gdbarch,
896 struct bp_target_info *bp_tgt)
897{
898 volatile struct gdb_exception except;
899 int old, ret;
900
901 /* Removing breakpoints requires accessing memory. Allow it for the
902 duration of this function. */
903 old = record_btrace_allow_memory_access;
904 record_btrace_allow_memory_access = 1;
905
906 ret = 0;
907 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 908 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff
MM
909
910 record_btrace_allow_memory_access = old;
911
912 if (except.reason < 0)
913 throw_exception (except);
914
915 return ret;
916}
917
1f3ef581
MM
918/* The to_fetch_registers method of target record-btrace. */
919
920static void
921record_btrace_fetch_registers (struct target_ops *ops,
922 struct regcache *regcache, int regno)
923{
924 struct btrace_insn_iterator *replay;
925 struct thread_info *tp;
926
927 tp = find_thread_ptid (inferior_ptid);
928 gdb_assert (tp != NULL);
929
930 replay = tp->btrace.replay;
931 if (replay != NULL)
932 {
933 const struct btrace_insn *insn;
934 struct gdbarch *gdbarch;
935 int pcreg;
936
937 gdbarch = get_regcache_arch (regcache);
938 pcreg = gdbarch_pc_regnum (gdbarch);
939 if (pcreg < 0)
940 return;
941
942 /* We can only provide the PC register. */
943 if (regno >= 0 && regno != pcreg)
944 return;
945
946 insn = btrace_insn_get (replay);
947 gdb_assert (insn != NULL);
948
949 regcache_raw_supply (regcache, regno, &insn->pc);
950 }
951 else
952 {
953 struct target_ops *t;
954
955 for (t = ops->beneath; t != NULL; t = t->beneath)
956 if (t->to_fetch_registers != NULL)
957 {
958 t->to_fetch_registers (t, regcache, regno);
959 break;
960 }
961 }
962}
963
964/* The to_store_registers method of target record-btrace. */
965
966static void
967record_btrace_store_registers (struct target_ops *ops,
968 struct regcache *regcache, int regno)
969{
970 struct target_ops *t;
971
1c63c994 972 if (record_btrace_is_replaying (ops))
1f3ef581
MM
973 error (_("This record target does not allow writing registers."));
974
975 gdb_assert (may_write_registers != 0);
976
977 for (t = ops->beneath; t != NULL; t = t->beneath)
978 if (t->to_store_registers != NULL)
979 {
980 t->to_store_registers (t, regcache, regno);
981 return;
982 }
983
984 noprocess ();
985}
986
987/* The to_prepare_to_store method of target record-btrace. */
988
989static void
990record_btrace_prepare_to_store (struct target_ops *ops,
991 struct regcache *regcache)
992{
993 struct target_ops *t;
994
1c63c994 995 if (record_btrace_is_replaying (ops))
1f3ef581
MM
996 return;
997
998 for (t = ops->beneath; t != NULL; t = t->beneath)
999 if (t->to_prepare_to_store != NULL)
1000 {
1001 t->to_prepare_to_store (t, regcache);
1002 return;
1003 }
1004}
1005
0b722aec
MM
1006/* The branch trace frame cache. */
1007
1008struct btrace_frame_cache
1009{
1010 /* The thread. */
1011 struct thread_info *tp;
1012
1013 /* The frame info. */
1014 struct frame_info *frame;
1015
1016 /* The branch trace function segment. */
1017 const struct btrace_function *bfun;
1018};
1019
1020/* A struct btrace_frame_cache hash table indexed by NEXT. */
1021
1022static htab_t bfcache;
1023
1024/* hash_f for htab_create_alloc of bfcache. */
1025
1026static hashval_t
1027bfcache_hash (const void *arg)
1028{
1029 const struct btrace_frame_cache *cache = arg;
1030
1031 return htab_hash_pointer (cache->frame);
1032}
1033
1034/* eq_f for htab_create_alloc of bfcache. */
1035
1036static int
1037bfcache_eq (const void *arg1, const void *arg2)
1038{
1039 const struct btrace_frame_cache *cache1 = arg1;
1040 const struct btrace_frame_cache *cache2 = arg2;
1041
1042 return cache1->frame == cache2->frame;
1043}
1044
1045/* Create a new btrace frame cache. */
1046
1047static struct btrace_frame_cache *
1048bfcache_new (struct frame_info *frame)
1049{
1050 struct btrace_frame_cache *cache;
1051 void **slot;
1052
1053 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1054 cache->frame = frame;
1055
1056 slot = htab_find_slot (bfcache, cache, INSERT);
1057 gdb_assert (*slot == NULL);
1058 *slot = cache;
1059
1060 return cache;
1061}
1062
1063/* Extract the branch trace function from a branch trace frame. */
1064
1065static const struct btrace_function *
1066btrace_get_frame_function (struct frame_info *frame)
1067{
1068 const struct btrace_frame_cache *cache;
1069 const struct btrace_function *bfun;
1070 struct btrace_frame_cache pattern;
1071 void **slot;
1072
1073 pattern.frame = frame;
1074
1075 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1076 if (slot == NULL)
1077 return NULL;
1078
1079 cache = *slot;
1080 return cache->bfun;
1081}
1082
cecac1ab
MM
1083/* Implement stop_reason method for record_btrace_frame_unwind. */
1084
1085static enum unwind_stop_reason
1086record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1087 void **this_cache)
1088{
0b722aec
MM
1089 const struct btrace_frame_cache *cache;
1090 const struct btrace_function *bfun;
1091
1092 cache = *this_cache;
1093 bfun = cache->bfun;
1094 gdb_assert (bfun != NULL);
1095
1096 if (bfun->up == NULL)
1097 return UNWIND_UNAVAILABLE;
1098
1099 return UNWIND_NO_REASON;
cecac1ab
MM
1100}
1101
1102/* Implement this_id method for record_btrace_frame_unwind. */
1103
1104static void
1105record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1106 struct frame_id *this_id)
1107{
0b722aec
MM
1108 const struct btrace_frame_cache *cache;
1109 const struct btrace_function *bfun;
1110 CORE_ADDR code, special;
1111
1112 cache = *this_cache;
1113
1114 bfun = cache->bfun;
1115 gdb_assert (bfun != NULL);
1116
1117 while (bfun->segment.prev != NULL)
1118 bfun = bfun->segment.prev;
1119
1120 code = get_frame_func (this_frame);
1121 special = bfun->number;
1122
1123 *this_id = frame_id_build_unavailable_stack_special (code, special);
1124
1125 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1126 btrace_get_bfun_name (cache->bfun),
1127 core_addr_to_string_nz (this_id->code_addr),
1128 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1129}
1130
1131/* Implement prev_register method for record_btrace_frame_unwind. */
1132
1133static struct value *
1134record_btrace_frame_prev_register (struct frame_info *this_frame,
1135 void **this_cache,
1136 int regnum)
1137{
0b722aec
MM
1138 const struct btrace_frame_cache *cache;
1139 const struct btrace_function *bfun, *caller;
1140 const struct btrace_insn *insn;
1141 struct gdbarch *gdbarch;
1142 CORE_ADDR pc;
1143 int pcreg;
1144
1145 gdbarch = get_frame_arch (this_frame);
1146 pcreg = gdbarch_pc_regnum (gdbarch);
1147 if (pcreg < 0 || regnum != pcreg)
1148 throw_error (NOT_AVAILABLE_ERROR,
1149 _("Registers are not available in btrace record history"));
1150
1151 cache = *this_cache;
1152 bfun = cache->bfun;
1153 gdb_assert (bfun != NULL);
1154
1155 caller = bfun->up;
1156 if (caller == NULL)
1157 throw_error (NOT_AVAILABLE_ERROR,
1158 _("No caller in btrace record history"));
1159
1160 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1161 {
1162 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1163 pc = insn->pc;
1164 }
1165 else
1166 {
1167 insn = VEC_last (btrace_insn_s, caller->insn);
1168 pc = insn->pc;
1169
1170 pc += gdb_insn_length (gdbarch, pc);
1171 }
1172
1173 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1174 btrace_get_bfun_name (bfun), bfun->level,
1175 core_addr_to_string_nz (pc));
1176
1177 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1178}
1179
1180/* Implement sniffer method for record_btrace_frame_unwind. */
1181
1182static int
1183record_btrace_frame_sniffer (const struct frame_unwind *self,
1184 struct frame_info *this_frame,
1185 void **this_cache)
1186{
0b722aec
MM
1187 const struct btrace_function *bfun;
1188 struct btrace_frame_cache *cache;
cecac1ab 1189 struct thread_info *tp;
0b722aec 1190 struct frame_info *next;
cecac1ab
MM
1191
1192 /* THIS_FRAME does not contain a reference to its thread. */
1193 tp = find_thread_ptid (inferior_ptid);
1194 gdb_assert (tp != NULL);
1195
0b722aec
MM
1196 bfun = NULL;
1197 next = get_next_frame (this_frame);
1198 if (next == NULL)
1199 {
1200 const struct btrace_insn_iterator *replay;
1201
1202 replay = tp->btrace.replay;
1203 if (replay != NULL)
1204 bfun = replay->function;
1205 }
1206 else
1207 {
1208 const struct btrace_function *callee;
1209
1210 callee = btrace_get_frame_function (next);
1211 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1212 bfun = callee->up;
1213 }
1214
1215 if (bfun == NULL)
1216 return 0;
1217
1218 DEBUG ("[frame] sniffed frame for %s on level %d",
1219 btrace_get_bfun_name (bfun), bfun->level);
1220
1221 /* This is our frame. Initialize the frame cache. */
1222 cache = bfcache_new (this_frame);
1223 cache->tp = tp;
1224 cache->bfun = bfun;
1225
1226 *this_cache = cache;
1227 return 1;
1228}
1229
1230/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1231
1232static int
1233record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1234 struct frame_info *this_frame,
1235 void **this_cache)
1236{
1237 const struct btrace_function *bfun, *callee;
1238 struct btrace_frame_cache *cache;
1239 struct frame_info *next;
1240
1241 next = get_next_frame (this_frame);
1242 if (next == NULL)
1243 return 0;
1244
1245 callee = btrace_get_frame_function (next);
1246 if (callee == NULL)
1247 return 0;
1248
1249 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1250 return 0;
1251
1252 bfun = callee->up;
1253 if (bfun == NULL)
1254 return 0;
1255
1256 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1257 btrace_get_bfun_name (bfun), bfun->level);
1258
1259 /* This is our frame. Initialize the frame cache. */
1260 cache = bfcache_new (this_frame);
1261 cache->tp = find_thread_ptid (inferior_ptid);
1262 cache->bfun = bfun;
1263
1264 *this_cache = cache;
1265 return 1;
1266}
1267
1268static void
1269record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1270{
1271 struct btrace_frame_cache *cache;
1272 void **slot;
1273
1274 cache = this_cache;
1275
1276 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1277 gdb_assert (slot != NULL);
1278
1279 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1280}
1281
1282/* btrace recording does not store previous memory content, neither the stack
1283 frames content. Any unwinding would return errorneous results as the stack
1284 contents no longer matches the changed PC value restored from history.
1285 Therefore this unwinder reports any possibly unwound registers as
1286 <unavailable>. */
1287
0b722aec 1288const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1289{
1290 NORMAL_FRAME,
1291 record_btrace_frame_unwind_stop_reason,
1292 record_btrace_frame_this_id,
1293 record_btrace_frame_prev_register,
1294 NULL,
0b722aec
MM
1295 record_btrace_frame_sniffer,
1296 record_btrace_frame_dealloc_cache
1297};
1298
1299const struct frame_unwind record_btrace_tailcall_frame_unwind =
1300{
1301 TAILCALL_FRAME,
1302 record_btrace_frame_unwind_stop_reason,
1303 record_btrace_frame_this_id,
1304 record_btrace_frame_prev_register,
1305 NULL,
1306 record_btrace_tailcall_frame_sniffer,
1307 record_btrace_frame_dealloc_cache
cecac1ab 1308};
b2f4cfde 1309
ac01945b
TT
1310/* Implement the to_get_unwinder method. */
1311
1312static const struct frame_unwind *
1313record_btrace_to_get_unwinder (struct target_ops *self)
1314{
1315 return &record_btrace_frame_unwind;
1316}
1317
1318/* Implement the to_get_tailcall_unwinder method. */
1319
1320static const struct frame_unwind *
1321record_btrace_to_get_tailcall_unwinder (struct target_ops *self)
1322{
1323 return &record_btrace_tailcall_frame_unwind;
1324}
1325
52834460
MM
1326/* Indicate that TP should be resumed according to FLAG. */
1327
1328static void
1329record_btrace_resume_thread (struct thread_info *tp,
1330 enum btrace_thread_flag flag)
1331{
1332 struct btrace_thread_info *btinfo;
1333
1334 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1335
1336 btinfo = &tp->btrace;
1337
1338 if ((btinfo->flags & BTHR_MOVE) != 0)
1339 error (_("Thread already moving."));
1340
1341 /* Fetch the latest branch trace. */
1342 btrace_fetch (tp);
1343
1344 btinfo->flags |= flag;
1345}
1346
1347/* Find the thread to resume given a PTID. */
1348
1349static struct thread_info *
1350record_btrace_find_resume_thread (ptid_t ptid)
1351{
1352 struct thread_info *tp;
1353
1354 /* When asked to resume everything, we pick the current thread. */
1355 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1356 ptid = inferior_ptid;
1357
1358 return find_thread_ptid (ptid);
1359}
1360
1361/* Start replaying a thread. */
1362
1363static struct btrace_insn_iterator *
1364record_btrace_start_replaying (struct thread_info *tp)
1365{
1366 volatile struct gdb_exception except;
1367 struct btrace_insn_iterator *replay;
1368 struct btrace_thread_info *btinfo;
1369 int executing;
1370
1371 btinfo = &tp->btrace;
1372 replay = NULL;
1373
1374 /* We can't start replaying without trace. */
1375 if (btinfo->begin == NULL)
1376 return NULL;
1377
1378 /* Clear the executing flag to allow changes to the current frame.
1379 We are not actually running, yet. We just started a reverse execution
1380 command or a record goto command.
1381 For the latter, EXECUTING is false and this has no effect.
1382 For the former, EXECUTING is true and we're in to_wait, about to
1383 move the thread. Since we need to recompute the stack, we temporarily
1384 set EXECUTING to flase. */
1385 executing = is_executing (tp->ptid);
1386 set_executing (tp->ptid, 0);
1387
1388 /* GDB stores the current frame_id when stepping in order to detects steps
1389 into subroutines.
1390 Since frames are computed differently when we're replaying, we need to
1391 recompute those stored frames and fix them up so we can still detect
1392 subroutines after we started replaying. */
1393 TRY_CATCH (except, RETURN_MASK_ALL)
1394 {
1395 struct frame_info *frame;
1396 struct frame_id frame_id;
1397 int upd_step_frame_id, upd_step_stack_frame_id;
1398
1399 /* The current frame without replaying - computed via normal unwind. */
1400 frame = get_current_frame ();
1401 frame_id = get_frame_id (frame);
1402
1403 /* Check if we need to update any stepping-related frame id's. */
1404 upd_step_frame_id = frame_id_eq (frame_id,
1405 tp->control.step_frame_id);
1406 upd_step_stack_frame_id = frame_id_eq (frame_id,
1407 tp->control.step_stack_frame_id);
1408
1409 /* We start replaying at the end of the branch trace. This corresponds
1410 to the current instruction. */
1411 replay = xmalloc (sizeof (*replay));
1412 btrace_insn_end (replay, btinfo);
1413
1414 /* We're not replaying, yet. */
1415 gdb_assert (btinfo->replay == NULL);
1416 btinfo->replay = replay;
1417
1418 /* Make sure we're not using any stale registers. */
1419 registers_changed_ptid (tp->ptid);
1420
1421 /* The current frame with replaying - computed via btrace unwind. */
1422 frame = get_current_frame ();
1423 frame_id = get_frame_id (frame);
1424
1425 /* Replace stepping related frames where necessary. */
1426 if (upd_step_frame_id)
1427 tp->control.step_frame_id = frame_id;
1428 if (upd_step_stack_frame_id)
1429 tp->control.step_stack_frame_id = frame_id;
1430 }
1431
1432 /* Restore the previous execution state. */
1433 set_executing (tp->ptid, executing);
1434
1435 if (except.reason < 0)
1436 {
1437 xfree (btinfo->replay);
1438 btinfo->replay = NULL;
1439
1440 registers_changed_ptid (tp->ptid);
1441
1442 throw_exception (except);
1443 }
1444
1445 return replay;
1446}
1447
1448/* Stop replaying a thread. */
1449
1450static void
1451record_btrace_stop_replaying (struct thread_info *tp)
1452{
1453 struct btrace_thread_info *btinfo;
1454
1455 btinfo = &tp->btrace;
1456
1457 xfree (btinfo->replay);
1458 btinfo->replay = NULL;
1459
1460 /* Make sure we're not leaving any stale registers. */
1461 registers_changed_ptid (tp->ptid);
1462}
1463
b2f4cfde
MM
1464/* The to_resume method of target record-btrace. */
1465
1466static void
1467record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1468 enum gdb_signal signal)
1469{
52834460
MM
1470 struct thread_info *tp, *other;
1471 enum btrace_thread_flag flag;
1472
1473 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1474
1475 tp = record_btrace_find_resume_thread (ptid);
1476 if (tp == NULL)
1477 error (_("Cannot find thread to resume."));
1478
1479 /* Stop replaying other threads if the thread to resume is not replaying. */
1480 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1481 ALL_THREADS (other)
1482 record_btrace_stop_replaying (other);
1483
b2f4cfde 1484 /* As long as we're not replaying, just forward the request. */
1c63c994 1485 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1486 {
1487 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1488 if (ops->to_resume != NULL)
1489 return ops->to_resume (ops, ptid, step, signal);
1490
1491 error (_("Cannot find target for stepping."));
1492 }
1493
52834460
MM
1494 /* Compute the btrace thread flag for the requested move. */
1495 if (step == 0)
1496 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1497 else
1498 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1499
1500 /* At the moment, we only move a single thread. We could also move
1501 all threads in parallel by single-stepping each resumed thread
1502 until the first runs into an event.
1503 When we do that, we would want to continue all other threads.
1504 For now, just resume one thread to not confuse to_wait. */
1505 record_btrace_resume_thread (tp, flag);
1506
1507 /* We just indicate the resume intent here. The actual stepping happens in
1508 record_btrace_wait below. */
1509}
1510
1511/* Find a thread to move. */
1512
1513static struct thread_info *
1514record_btrace_find_thread_to_move (ptid_t ptid)
1515{
1516 struct thread_info *tp;
1517
1518 /* First check the parameter thread. */
1519 tp = find_thread_ptid (ptid);
1520 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1521 return tp;
1522
1523 /* Otherwise, find one other thread that has been resumed. */
1524 ALL_THREADS (tp)
1525 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1526 return tp;
1527
1528 return NULL;
1529}
1530
1531/* Return a target_waitstatus indicating that we ran out of history. */
1532
1533static struct target_waitstatus
1534btrace_step_no_history (void)
1535{
1536 struct target_waitstatus status;
1537
1538 status.kind = TARGET_WAITKIND_NO_HISTORY;
1539
1540 return status;
1541}
1542
1543/* Return a target_waitstatus indicating that a step finished. */
1544
1545static struct target_waitstatus
1546btrace_step_stopped (void)
1547{
1548 struct target_waitstatus status;
1549
1550 status.kind = TARGET_WAITKIND_STOPPED;
1551 status.value.sig = GDB_SIGNAL_TRAP;
1552
1553 return status;
1554}
1555
1556/* Clear the record histories. */
1557
1558static void
1559record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1560{
1561 xfree (btinfo->insn_history);
1562 xfree (btinfo->call_history);
1563
1564 btinfo->insn_history = NULL;
1565 btinfo->call_history = NULL;
1566}
1567
1568/* Step a single thread. */
1569
1570static struct target_waitstatus
1571record_btrace_step_thread (struct thread_info *tp)
1572{
1573 struct btrace_insn_iterator *replay, end;
1574 struct btrace_thread_info *btinfo;
1575 struct address_space *aspace;
1576 struct inferior *inf;
1577 enum btrace_thread_flag flags;
1578 unsigned int steps;
1579
1580 btinfo = &tp->btrace;
1581 replay = btinfo->replay;
1582
1583 flags = btinfo->flags & BTHR_MOVE;
1584 btinfo->flags &= ~BTHR_MOVE;
1585
1586 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1587
1588 switch (flags)
1589 {
1590 default:
1591 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1592
1593 case BTHR_STEP:
1594 /* We're done if we're not replaying. */
1595 if (replay == NULL)
1596 return btrace_step_no_history ();
1597
1598 /* We are always able to step at least once. */
1599 steps = btrace_insn_next (replay, 1);
1600 gdb_assert (steps == 1);
1601
1602 /* Determine the end of the instruction trace. */
1603 btrace_insn_end (&end, btinfo);
1604
1605 /* We stop replaying if we reached the end of the trace. */
1606 if (btrace_insn_cmp (replay, &end) == 0)
1607 record_btrace_stop_replaying (tp);
1608
1609 return btrace_step_stopped ();
1610
1611 case BTHR_RSTEP:
1612 /* Start replaying if we're not already doing so. */
1613 if (replay == NULL)
1614 replay = record_btrace_start_replaying (tp);
1615
1616 /* If we can't step any further, we reached the end of the history. */
1617 steps = btrace_insn_prev (replay, 1);
1618 if (steps == 0)
1619 return btrace_step_no_history ();
1620
1621 return btrace_step_stopped ();
1622
1623 case BTHR_CONT:
1624 /* We're done if we're not replaying. */
1625 if (replay == NULL)
1626 return btrace_step_no_history ();
1627
1628 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1629 aspace = inf->aspace;
1630
1631 /* Determine the end of the instruction trace. */
1632 btrace_insn_end (&end, btinfo);
1633
1634 for (;;)
1635 {
1636 const struct btrace_insn *insn;
1637
1638 /* We are always able to step at least once. */
1639 steps = btrace_insn_next (replay, 1);
1640 gdb_assert (steps == 1);
1641
1642 /* We stop replaying if we reached the end of the trace. */
1643 if (btrace_insn_cmp (replay, &end) == 0)
1644 {
1645 record_btrace_stop_replaying (tp);
1646 return btrace_step_no_history ();
1647 }
1648
1649 insn = btrace_insn_get (replay);
1650 gdb_assert (insn);
1651
1652 DEBUG ("stepping %d (%s) ... %s", tp->num,
1653 target_pid_to_str (tp->ptid),
1654 core_addr_to_string_nz (insn->pc));
1655
1656 if (breakpoint_here_p (aspace, insn->pc))
1657 return btrace_step_stopped ();
1658 }
1659
1660 case BTHR_RCONT:
1661 /* Start replaying if we're not already doing so. */
1662 if (replay == NULL)
1663 replay = record_btrace_start_replaying (tp);
1664
1665 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1666 aspace = inf->aspace;
1667
1668 for (;;)
1669 {
1670 const struct btrace_insn *insn;
1671
1672 /* If we can't step any further, we're done. */
1673 steps = btrace_insn_prev (replay, 1);
1674 if (steps == 0)
1675 return btrace_step_no_history ();
1676
1677 insn = btrace_insn_get (replay);
1678 gdb_assert (insn);
1679
1680 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1681 target_pid_to_str (tp->ptid),
1682 core_addr_to_string_nz (insn->pc));
1683
1684 if (breakpoint_here_p (aspace, insn->pc))
1685 return btrace_step_stopped ();
1686 }
1687 }
b2f4cfde
MM
1688}
1689
1690/* The to_wait method of target record-btrace. */
1691
1692static ptid_t
1693record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1694 struct target_waitstatus *status, int options)
1695{
52834460
MM
1696 struct thread_info *tp, *other;
1697
1698 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1699
b2f4cfde 1700 /* As long as we're not replaying, just forward the request. */
1c63c994 1701 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1702 {
1703 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1704 if (ops->to_wait != NULL)
1705 return ops->to_wait (ops, ptid, status, options);
1706
1707 error (_("Cannot find target for waiting."));
1708 }
1709
52834460
MM
1710 /* Let's find a thread to move. */
1711 tp = record_btrace_find_thread_to_move (ptid);
1712 if (tp == NULL)
1713 {
1714 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1715
1716 status->kind = TARGET_WAITKIND_IGNORE;
1717 return minus_one_ptid;
1718 }
1719
1720 /* We only move a single thread. We're not able to correlate threads. */
1721 *status = record_btrace_step_thread (tp);
1722
1723 /* Stop all other threads. */
1724 if (!non_stop)
1725 ALL_THREADS (other)
1726 other->btrace.flags &= ~BTHR_MOVE;
1727
1728 /* Start record histories anew from the current position. */
1729 record_btrace_clear_histories (&tp->btrace);
1730
1731 /* We moved the replay position but did not update registers. */
1732 registers_changed_ptid (tp->ptid);
1733
1734 return tp->ptid;
1735}
1736
1737/* The to_can_execute_reverse method of target record-btrace. */
1738
1739static int
19db3e69 1740record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1741{
1742 return 1;
1743}
1744
1745/* The to_decr_pc_after_break method of target record-btrace. */
1746
1747static CORE_ADDR
1748record_btrace_decr_pc_after_break (struct target_ops *ops,
1749 struct gdbarch *gdbarch)
1750{
1751 /* When replaying, we do not actually execute the breakpoint instruction
1752 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1753 if (record_btrace_is_replaying (ops))
52834460
MM
1754 return 0;
1755
c0eca49f 1756 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1757}
1758
e2887aa3
MM
1759/* The to_find_new_threads method of target record-btrace. */
1760
1761static void
1762record_btrace_find_new_threads (struct target_ops *ops)
1763{
1764 /* Don't expect new threads if we're replaying. */
1c63c994 1765 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1766 return;
1767
1768 /* Forward the request. */
1769 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1770 if (ops->to_find_new_threads != NULL)
1771 {
1772 ops->to_find_new_threads (ops);
1773 break;
1774 }
1775}
1776
1777/* The to_thread_alive method of target record-btrace. */
1778
1779static int
1780record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1781{
1782 /* We don't add or remove threads during replay. */
1c63c994 1783 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1784 return find_thread_ptid (ptid) != NULL;
1785
1786 /* Forward the request. */
1787 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1788 if (ops->to_thread_alive != NULL)
1789 return ops->to_thread_alive (ops, ptid);
1790
1791 return 0;
1792}
1793
066ce621
MM
1794/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1795 is stopped. */
1796
1797static void
1798record_btrace_set_replay (struct thread_info *tp,
1799 const struct btrace_insn_iterator *it)
1800{
1801 struct btrace_thread_info *btinfo;
1802
1803 btinfo = &tp->btrace;
1804
1805 if (it == NULL || it->function == NULL)
52834460 1806 record_btrace_stop_replaying (tp);
066ce621
MM
1807 else
1808 {
1809 if (btinfo->replay == NULL)
52834460 1810 record_btrace_start_replaying (tp);
066ce621
MM
1811 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1812 return;
1813
1814 *btinfo->replay = *it;
52834460 1815 registers_changed_ptid (tp->ptid);
066ce621
MM
1816 }
1817
52834460
MM
1818 /* Start anew from the new replay position. */
1819 record_btrace_clear_histories (btinfo);
066ce621
MM
1820}
1821
1822/* The to_goto_record_begin method of target record-btrace. */
1823
1824static void
08475817 1825record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1826{
1827 struct thread_info *tp;
1828 struct btrace_insn_iterator begin;
1829
1830 tp = require_btrace_thread ();
1831
1832 btrace_insn_begin (&begin, &tp->btrace);
1833 record_btrace_set_replay (tp, &begin);
1834
1835 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1836}
1837
1838/* The to_goto_record_end method of target record-btrace. */
1839
1840static void
307a1b91 1841record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1842{
1843 struct thread_info *tp;
1844
1845 tp = require_btrace_thread ();
1846
1847 record_btrace_set_replay (tp, NULL);
1848
1849 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1850}
1851
1852/* The to_goto_record method of target record-btrace. */
1853
1854static void
606183ac 1855record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1856{
1857 struct thread_info *tp;
1858 struct btrace_insn_iterator it;
1859 unsigned int number;
1860 int found;
1861
1862 number = insn;
1863
1864 /* Check for wrap-arounds. */
1865 if (number != insn)
1866 error (_("Instruction number out of range."));
1867
1868 tp = require_btrace_thread ();
1869
1870 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1871 if (found == 0)
1872 error (_("No such instruction."));
1873
1874 record_btrace_set_replay (tp, &it);
1875
1876 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1877}
1878
afedecd3
MM
1879/* Initialize the record-btrace target ops. */
1880
1881static void
1882init_record_btrace_ops (void)
1883{
1884 struct target_ops *ops;
1885
1886 ops = &record_btrace_ops;
1887 ops->to_shortname = "record-btrace";
1888 ops->to_longname = "Branch tracing target";
1889 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1890 ops->to_open = record_btrace_open;
1891 ops->to_close = record_btrace_close;
1892 ops->to_detach = record_detach;
1893 ops->to_disconnect = record_disconnect;
1894 ops->to_mourn_inferior = record_mourn_inferior;
1895 ops->to_kill = record_kill;
1896 ops->to_create_inferior = find_default_create_inferior;
1897 ops->to_stop_recording = record_btrace_stop_recording;
1898 ops->to_info_record = record_btrace_info;
1899 ops->to_insn_history = record_btrace_insn_history;
1900 ops->to_insn_history_from = record_btrace_insn_history_from;
1901 ops->to_insn_history_range = record_btrace_insn_history_range;
1902 ops->to_call_history = record_btrace_call_history;
1903 ops->to_call_history_from = record_btrace_call_history_from;
1904 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1905 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1906 ops->to_xfer_partial = record_btrace_xfer_partial;
1907 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1908 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1909 ops->to_fetch_registers = record_btrace_fetch_registers;
1910 ops->to_store_registers = record_btrace_store_registers;
1911 ops->to_prepare_to_store = record_btrace_prepare_to_store;
ac01945b
TT
1912 ops->to_get_unwinder = &record_btrace_to_get_unwinder;
1913 ops->to_get_tailcall_unwinder = &record_btrace_to_get_tailcall_unwinder;
b2f4cfde
MM
1914 ops->to_resume = record_btrace_resume;
1915 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1916 ops->to_find_new_threads = record_btrace_find_new_threads;
1917 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1918 ops->to_goto_record_begin = record_btrace_goto_begin;
1919 ops->to_goto_record_end = record_btrace_goto_end;
1920 ops->to_goto_record = record_btrace_goto;
52834460
MM
1921 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1922 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
afedecd3
MM
1923 ops->to_stratum = record_stratum;
1924 ops->to_magic = OPS_MAGIC;
1925}
1926
1927/* Alias for "target record". */
1928
1929static void
1930cmd_record_btrace_start (char *args, int from_tty)
1931{
1932 if (args != NULL && *args != 0)
1933 error (_("Invalid argument."));
1934
1935 execute_command ("target record-btrace", from_tty);
1936}
1937
1938void _initialize_record_btrace (void);
1939
1940/* Initialize btrace commands. */
1941
1942void
1943_initialize_record_btrace (void)
1944{
1945 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1946 _("Start branch trace recording."),
1947 &record_cmdlist);
1948 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1949
1950 init_record_btrace_ops ();
1951 add_target (&record_btrace_ops);
0b722aec
MM
1952
1953 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1954 xcalloc, xfree);
afedecd3 1955}