]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
Add target_ops argument to to_insn_history_range
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "exceptions.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
afedecd3
MM
38
39/* The target_ops of record-btrace. */
40static struct target_ops record_btrace_ops;
41
42/* A new thread observer enabling branch tracing for the new thread. */
43static struct observer *record_btrace_thread_observer;
44
633785ff
MM
45/* Temporarily allow memory accesses. */
46static int record_btrace_allow_memory_access;
47
afedecd3
MM
48/* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51#define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61/* Update the branch trace for the current thread and return a pointer to its
066ce621 62 thread_info.
afedecd3
MM
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
066ce621
MM
67static struct thread_info *
68require_btrace_thread (void)
afedecd3
MM
69{
70 struct thread_info *tp;
afedecd3
MM
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
6e07b1d2 80 if (btrace_is_empty (tp))
afedecd3
MM
81 error (_("No trace."));
82
066ce621
MM
83 return tp;
84}
85
86/* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92static struct btrace_thread_info *
93require_btrace (void)
94{
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
afedecd3
MM
100}
101
102/* Enable branch tracing for one thread. Warn on errors. */
103
104static void
105record_btrace_enable_warn (struct thread_info *tp)
106{
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114}
115
116/* Callback function to disable branch tracing for one thread. */
117
118static void
119record_btrace_disable_callback (void *arg)
120{
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126}
127
128/* Enable automatic tracing of new threads. */
129
130static void
131record_btrace_auto_enable (void)
132{
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137}
138
139/* Disable automatic tracing of new threads. */
140
141static void
142record_btrace_auto_disable (void)
143{
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152}
153
154/* The to_open method of target record-btrace. */
155
156static void
157record_btrace_open (char *args, int from_tty)
158{
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
8213266a 164 record_preopen ();
afedecd3
MM
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
52834460
MM
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
afedecd3
MM
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193}
194
195/* The to_stop_recording method of target record-btrace. */
196
197static void
c6cd7c02 198record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
199{
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209}
210
211/* The to_close method of target record-btrace. */
212
213static void
de90e03d 214record_btrace_close (struct target_ops *self)
afedecd3 215{
568e808b
MM
216 struct thread_info *tp;
217
99c819ee
MM
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
221
568e808b
MM
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
afedecd3
MM
226}
227
228/* The to_info_record method of target record-btrace. */
229
230static void
630d6a4a 231record_btrace_info (struct target_ops *self)
afedecd3
MM
232{
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
23a7fe75 235 unsigned int insns, calls;
afedecd3
MM
236
237 DEBUG ("info");
238
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
242
243 btrace_fetch (tp);
244
23a7fe75
MM
245 insns = 0;
246 calls = 0;
247
afedecd3 248 btinfo = &tp->btrace;
6e07b1d2
MM
249
250 if (!btrace_is_empty (tp))
23a7fe75
MM
251 {
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
254
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
5de9129b 257 calls = btrace_call_number (&call);
23a7fe75
MM
258
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
5de9129b 261 insns = btrace_insn_number (&insn);
23a7fe75 262 }
afedecd3
MM
263
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 265 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 266 target_pid_to_str (tp->ptid));
07bbe694
MM
267
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
afedecd3
MM
271}
272
273/* Print an unsigned int. */
274
275static void
276ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
277{
278 ui_out_field_fmt (uiout, fld, "%u", val);
279}
280
281/* Disassemble a section of the recorded instruction trace. */
282
283static void
23a7fe75
MM
284btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
287{
288 struct gdbarch *gdbarch;
23a7fe75 289 struct btrace_insn_iterator it;
afedecd3 290
23a7fe75
MM
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
afedecd3
MM
293
294 gdbarch = target_gdbarch ();
295
23a7fe75 296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 297 {
23a7fe75
MM
298 const struct btrace_insn *insn;
299
300 insn = btrace_insn_get (&it);
301
afedecd3 302 /* Print the instruction index. */
23a7fe75 303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
304 ui_out_text (uiout, "\t");
305
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
23a7fe75 308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
309 }
310}
311
312/* The to_insn_history method of target record-btrace. */
313
314static void
7a6c5609 315record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
316{
317 struct btrace_thread_info *btinfo;
23a7fe75
MM
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
afedecd3
MM
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
23a7fe75 322 unsigned int context, covered;
afedecd3
MM
323
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
afedecd3 327 context = abs (size);
afedecd3
MM
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
330
23a7fe75
MM
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
afedecd3 334 {
07bbe694 335 struct btrace_insn_iterator *replay;
afedecd3 336
23a7fe75 337 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 338
07bbe694
MM
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
346
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
352 {
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
357 }
358 else
359 {
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
362 }
afedecd3
MM
363 }
364 else
365 {
23a7fe75
MM
366 begin = history->begin;
367 end = history->end;
afedecd3 368
23a7fe75
MM
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 371
23a7fe75
MM
372 if (size < 0)
373 {
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
376 }
377 else
378 {
379 begin = end;
380 covered = btrace_insn_next (&end, context);
381 }
afedecd3
MM
382 }
383
23a7fe75
MM
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
387 {
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
392 }
afedecd3 393
23a7fe75 394 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
395 do_cleanups (uiout_cleanup);
396}
397
398/* The to_insn_history_range method of target record-btrace. */
399
400static void
4e99c6b7
TT
401record_btrace_insn_history_range (struct target_ops *self,
402 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
403{
404 struct btrace_thread_info *btinfo;
23a7fe75
MM
405 struct btrace_insn_history *history;
406 struct btrace_insn_iterator begin, end;
afedecd3
MM
407 struct cleanup *uiout_cleanup;
408 struct ui_out *uiout;
23a7fe75
MM
409 unsigned int low, high;
410 int found;
afedecd3
MM
411
412 uiout = current_uiout;
413 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
414 "insn history");
23a7fe75
MM
415 low = from;
416 high = to;
afedecd3 417
23a7fe75 418 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
419
420 /* Check for wrap-arounds. */
23a7fe75 421 if (low != from || high != to)
afedecd3
MM
422 error (_("Bad range."));
423
0688d04e 424 if (high < low)
afedecd3
MM
425 error (_("Bad range."));
426
23a7fe75 427 btinfo = require_btrace ();
afedecd3 428
23a7fe75
MM
429 found = btrace_find_insn_by_number (&begin, btinfo, low);
430 if (found == 0)
431 error (_("Range out of bounds."));
afedecd3 432
23a7fe75
MM
433 found = btrace_find_insn_by_number (&end, btinfo, high);
434 if (found == 0)
0688d04e
MM
435 {
436 /* Silently truncate the range. */
437 btrace_insn_end (&end, btinfo);
438 }
439 else
440 {
441 /* We want both begin and end to be inclusive. */
442 btrace_insn_next (&end, 1);
443 }
afedecd3 444
23a7fe75
MM
445 btrace_insn_history (uiout, &begin, &end, flags);
446 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
447
448 do_cleanups (uiout_cleanup);
449}
450
451/* The to_insn_history_from method of target record-btrace. */
452
453static void
9abc3ff3
TT
454record_btrace_insn_history_from (struct target_ops *self,
455 ULONGEST from, int size, int flags)
afedecd3
MM
456{
457 ULONGEST begin, end, context;
458
459 context = abs (size);
0688d04e
MM
460 if (context == 0)
461 error (_("Bad record instruction-history-size."));
afedecd3
MM
462
463 if (size < 0)
464 {
465 end = from;
466
467 if (from < context)
468 begin = 0;
469 else
0688d04e 470 begin = from - context + 1;
afedecd3
MM
471 }
472 else
473 {
474 begin = from;
0688d04e 475 end = from + context - 1;
afedecd3
MM
476
477 /* Check for wrap-around. */
478 if (end < begin)
479 end = ULONGEST_MAX;
480 }
481
4e99c6b7 482 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
483}
484
485/* Print the instruction number range for a function call history line. */
486
487static void
23a7fe75
MM
488btrace_call_history_insn_range (struct ui_out *uiout,
489 const struct btrace_function *bfun)
afedecd3 490{
7acbe133
MM
491 unsigned int begin, end, size;
492
493 size = VEC_length (btrace_insn_s, bfun->insn);
494 gdb_assert (size > 0);
afedecd3 495
23a7fe75 496 begin = bfun->insn_offset;
7acbe133 497 end = begin + size - 1;
afedecd3 498
23a7fe75 499 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 500 ui_out_text (uiout, ",");
23a7fe75 501 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
502}
503
504/* Print the source line information for a function call history line. */
505
506static void
23a7fe75
MM
507btrace_call_history_src_line (struct ui_out *uiout,
508 const struct btrace_function *bfun)
afedecd3
MM
509{
510 struct symbol *sym;
23a7fe75 511 int begin, end;
afedecd3
MM
512
513 sym = bfun->sym;
514 if (sym == NULL)
515 return;
516
517 ui_out_field_string (uiout, "file",
518 symtab_to_filename_for_display (sym->symtab));
519
23a7fe75
MM
520 begin = bfun->lbegin;
521 end = bfun->lend;
522
523 if (end < begin)
afedecd3
MM
524 return;
525
526 ui_out_text (uiout, ":");
23a7fe75 527 ui_out_field_int (uiout, "min line", begin);
afedecd3 528
23a7fe75 529 if (end == begin)
afedecd3
MM
530 return;
531
8710b709 532 ui_out_text (uiout, ",");
23a7fe75 533 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
534}
535
0b722aec
MM
536/* Get the name of a branch trace function. */
537
538static const char *
539btrace_get_bfun_name (const struct btrace_function *bfun)
540{
541 struct minimal_symbol *msym;
542 struct symbol *sym;
543
544 if (bfun == NULL)
545 return "??";
546
547 msym = bfun->msym;
548 sym = bfun->sym;
549
550 if (sym != NULL)
551 return SYMBOL_PRINT_NAME (sym);
552 else if (msym != NULL)
553 return SYMBOL_PRINT_NAME (msym);
554 else
555 return "??";
556}
557
afedecd3
MM
558/* Disassemble a section of the recorded function trace. */
559
560static void
23a7fe75 561btrace_call_history (struct ui_out *uiout,
8710b709 562 const struct btrace_thread_info *btinfo,
23a7fe75
MM
563 const struct btrace_call_iterator *begin,
564 const struct btrace_call_iterator *end,
afedecd3
MM
565 enum record_print_flag flags)
566{
23a7fe75 567 struct btrace_call_iterator it;
afedecd3 568
23a7fe75
MM
569 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
570 btrace_call_number (end));
afedecd3 571
23a7fe75 572 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 573 {
23a7fe75
MM
574 const struct btrace_function *bfun;
575 struct minimal_symbol *msym;
576 struct symbol *sym;
577
578 bfun = btrace_call_get (&it);
23a7fe75 579 sym = bfun->sym;
0b722aec 580 msym = bfun->msym;
23a7fe75 581
afedecd3 582 /* Print the function index. */
23a7fe75 583 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
584 ui_out_text (uiout, "\t");
585
8710b709
MM
586 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
587 {
588 int level = bfun->level + btinfo->level, i;
589
590 for (i = 0; i < level; ++i)
591 ui_out_text (uiout, " ");
592 }
593
594 if (sym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
596 else if (msym != NULL)
597 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
598 else if (!ui_out_is_mi_like_p (uiout))
599 ui_out_field_string (uiout, "function", "??");
600
1e038f67 601 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 602 {
8710b709 603 ui_out_text (uiout, _("\tinst "));
23a7fe75 604 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
605 }
606
1e038f67 607 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 608 {
8710b709 609 ui_out_text (uiout, _("\tat "));
23a7fe75 610 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
611 }
612
afedecd3
MM
613 ui_out_text (uiout, "\n");
614 }
615}
616
617/* The to_call_history method of target record-btrace. */
618
619static void
620record_btrace_call_history (int size, int flags)
621{
622 struct btrace_thread_info *btinfo;
23a7fe75
MM
623 struct btrace_call_history *history;
624 struct btrace_call_iterator begin, end;
afedecd3
MM
625 struct cleanup *uiout_cleanup;
626 struct ui_out *uiout;
23a7fe75 627 unsigned int context, covered;
afedecd3
MM
628
629 uiout = current_uiout;
630 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
631 "insn history");
afedecd3 632 context = abs (size);
afedecd3
MM
633 if (context == 0)
634 error (_("Bad record function-call-history-size."));
635
23a7fe75
MM
636 btinfo = require_btrace ();
637 history = btinfo->call_history;
638 if (history == NULL)
afedecd3 639 {
07bbe694 640 struct btrace_insn_iterator *replay;
afedecd3 641
23a7fe75 642 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 643
07bbe694
MM
644 /* If we're replaying, we start at the replay position. Otherwise, we
645 start at the tail of the trace. */
646 replay = btinfo->replay;
647 if (replay != NULL)
648 {
649 begin.function = replay->function;
650 begin.btinfo = btinfo;
651 }
652 else
653 btrace_call_end (&begin, btinfo);
654
655 /* We start from here and expand in the requested direction. Then we
656 expand in the other direction, as well, to fill up any remaining
657 context. */
658 end = begin;
659 if (size < 0)
660 {
661 /* We want the current position covered, as well. */
662 covered = btrace_call_next (&end, 1);
663 covered += btrace_call_prev (&begin, context - covered);
664 covered += btrace_call_next (&end, context - covered);
665 }
666 else
667 {
668 covered = btrace_call_next (&end, context);
669 covered += btrace_call_prev (&begin, context- covered);
670 }
afedecd3
MM
671 }
672 else
673 {
23a7fe75
MM
674 begin = history->begin;
675 end = history->end;
afedecd3 676
23a7fe75
MM
677 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
678 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 679
23a7fe75
MM
680 if (size < 0)
681 {
682 end = begin;
683 covered = btrace_call_prev (&begin, context);
684 }
685 else
686 {
687 begin = end;
688 covered = btrace_call_next (&end, context);
689 }
afedecd3
MM
690 }
691
23a7fe75 692 if (covered > 0)
8710b709 693 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
694 else
695 {
696 if (size < 0)
697 printf_unfiltered (_("At the start of the branch trace record.\n"));
698 else
699 printf_unfiltered (_("At the end of the branch trace record.\n"));
700 }
afedecd3 701
23a7fe75 702 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
703 do_cleanups (uiout_cleanup);
704}
705
706/* The to_call_history_range method of target record-btrace. */
707
708static void
709record_btrace_call_history_range (ULONGEST from, ULONGEST to, int flags)
710{
711 struct btrace_thread_info *btinfo;
23a7fe75
MM
712 struct btrace_call_history *history;
713 struct btrace_call_iterator begin, end;
afedecd3
MM
714 struct cleanup *uiout_cleanup;
715 struct ui_out *uiout;
23a7fe75
MM
716 unsigned int low, high;
717 int found;
afedecd3
MM
718
719 uiout = current_uiout;
720 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
721 "func history");
23a7fe75
MM
722 low = from;
723 high = to;
afedecd3 724
23a7fe75 725 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
726
727 /* Check for wrap-arounds. */
23a7fe75 728 if (low != from || high != to)
afedecd3
MM
729 error (_("Bad range."));
730
0688d04e 731 if (high < low)
afedecd3
MM
732 error (_("Bad range."));
733
23a7fe75 734 btinfo = require_btrace ();
afedecd3 735
23a7fe75
MM
736 found = btrace_find_call_by_number (&begin, btinfo, low);
737 if (found == 0)
738 error (_("Range out of bounds."));
afedecd3 739
23a7fe75
MM
740 found = btrace_find_call_by_number (&end, btinfo, high);
741 if (found == 0)
0688d04e
MM
742 {
743 /* Silently truncate the range. */
744 btrace_call_end (&end, btinfo);
745 }
746 else
747 {
748 /* We want both begin and end to be inclusive. */
749 btrace_call_next (&end, 1);
750 }
afedecd3 751
8710b709 752 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 753 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
754
755 do_cleanups (uiout_cleanup);
756}
757
758/* The to_call_history_from method of target record-btrace. */
759
760static void
761record_btrace_call_history_from (ULONGEST from, int size, int flags)
762{
763 ULONGEST begin, end, context;
764
765 context = abs (size);
0688d04e
MM
766 if (context == 0)
767 error (_("Bad record function-call-history-size."));
afedecd3
MM
768
769 if (size < 0)
770 {
771 end = from;
772
773 if (from < context)
774 begin = 0;
775 else
0688d04e 776 begin = from - context + 1;
afedecd3
MM
777 }
778 else
779 {
780 begin = from;
0688d04e 781 end = from + context - 1;
afedecd3
MM
782
783 /* Check for wrap-around. */
784 if (end < begin)
785 end = ULONGEST_MAX;
786 }
787
788 record_btrace_call_history_range (begin, end, flags);
789}
790
07bbe694
MM
791/* The to_record_is_replaying method of target record-btrace. */
792
793static int
1c63c994 794record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
795{
796 struct thread_info *tp;
797
798 ALL_THREADS (tp)
799 if (btrace_is_replaying (tp))
800 return 1;
801
802 return 0;
803}
804
633785ff
MM
805/* The to_xfer_partial method of target record-btrace. */
806
9b409511 807static enum target_xfer_status
633785ff
MM
808record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
809 const char *annex, gdb_byte *readbuf,
810 const gdb_byte *writebuf, ULONGEST offset,
9b409511 811 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
812{
813 struct target_ops *t;
814
815 /* Filter out requests that don't make sense during replay. */
1c63c994 816 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
633785ff
MM
817 {
818 switch (object)
819 {
820 case TARGET_OBJECT_MEMORY:
821 {
822 struct target_section *section;
823
824 /* We do not allow writing memory in general. */
825 if (writebuf != NULL)
9b409511
YQ
826 {
827 *xfered_len = len;
828 return TARGET_XFER_E_UNAVAILABLE;
829 }
633785ff
MM
830
831 /* We allow reading readonly memory. */
832 section = target_section_by_addr (ops, offset);
833 if (section != NULL)
834 {
835 /* Check if the section we found is readonly. */
836 if ((bfd_get_section_flags (section->the_bfd_section->owner,
837 section->the_bfd_section)
838 & SEC_READONLY) != 0)
839 {
840 /* Truncate the request to fit into this section. */
841 len = min (len, section->endaddr - offset);
842 break;
843 }
844 }
845
9b409511 846 *xfered_len = len;
633785ff
MM
847 return TARGET_XFER_E_UNAVAILABLE;
848 }
849 }
850 }
851
852 /* Forward the request. */
853 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
854 if (ops->to_xfer_partial != NULL)
855 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 856 offset, len, xfered_len);
633785ff 857
9b409511 858 *xfered_len = len;
633785ff
MM
859 return TARGET_XFER_E_UNAVAILABLE;
860}
861
862/* The to_insert_breakpoint method of target record-btrace. */
863
864static int
865record_btrace_insert_breakpoint (struct target_ops *ops,
866 struct gdbarch *gdbarch,
867 struct bp_target_info *bp_tgt)
868{
869 volatile struct gdb_exception except;
870 int old, ret;
871
872 /* Inserting breakpoints requires accessing memory. Allow it for the
873 duration of this function. */
874 old = record_btrace_allow_memory_access;
875 record_btrace_allow_memory_access = 1;
876
877 ret = 0;
878 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 879 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff
MM
880
881 record_btrace_allow_memory_access = old;
882
883 if (except.reason < 0)
884 throw_exception (except);
885
886 return ret;
887}
888
889/* The to_remove_breakpoint method of target record-btrace. */
890
891static int
892record_btrace_remove_breakpoint (struct target_ops *ops,
893 struct gdbarch *gdbarch,
894 struct bp_target_info *bp_tgt)
895{
896 volatile struct gdb_exception except;
897 int old, ret;
898
899 /* Removing breakpoints requires accessing memory. Allow it for the
900 duration of this function. */
901 old = record_btrace_allow_memory_access;
902 record_btrace_allow_memory_access = 1;
903
904 ret = 0;
905 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 906 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff
MM
907
908 record_btrace_allow_memory_access = old;
909
910 if (except.reason < 0)
911 throw_exception (except);
912
913 return ret;
914}
915
1f3ef581
MM
916/* The to_fetch_registers method of target record-btrace. */
917
918static void
919record_btrace_fetch_registers (struct target_ops *ops,
920 struct regcache *regcache, int regno)
921{
922 struct btrace_insn_iterator *replay;
923 struct thread_info *tp;
924
925 tp = find_thread_ptid (inferior_ptid);
926 gdb_assert (tp != NULL);
927
928 replay = tp->btrace.replay;
929 if (replay != NULL)
930 {
931 const struct btrace_insn *insn;
932 struct gdbarch *gdbarch;
933 int pcreg;
934
935 gdbarch = get_regcache_arch (regcache);
936 pcreg = gdbarch_pc_regnum (gdbarch);
937 if (pcreg < 0)
938 return;
939
940 /* We can only provide the PC register. */
941 if (regno >= 0 && regno != pcreg)
942 return;
943
944 insn = btrace_insn_get (replay);
945 gdb_assert (insn != NULL);
946
947 regcache_raw_supply (regcache, regno, &insn->pc);
948 }
949 else
950 {
951 struct target_ops *t;
952
953 for (t = ops->beneath; t != NULL; t = t->beneath)
954 if (t->to_fetch_registers != NULL)
955 {
956 t->to_fetch_registers (t, regcache, regno);
957 break;
958 }
959 }
960}
961
962/* The to_store_registers method of target record-btrace. */
963
964static void
965record_btrace_store_registers (struct target_ops *ops,
966 struct regcache *regcache, int regno)
967{
968 struct target_ops *t;
969
1c63c994 970 if (record_btrace_is_replaying (ops))
1f3ef581
MM
971 error (_("This record target does not allow writing registers."));
972
973 gdb_assert (may_write_registers != 0);
974
975 for (t = ops->beneath; t != NULL; t = t->beneath)
976 if (t->to_store_registers != NULL)
977 {
978 t->to_store_registers (t, regcache, regno);
979 return;
980 }
981
982 noprocess ();
983}
984
985/* The to_prepare_to_store method of target record-btrace. */
986
987static void
988record_btrace_prepare_to_store (struct target_ops *ops,
989 struct regcache *regcache)
990{
991 struct target_ops *t;
992
1c63c994 993 if (record_btrace_is_replaying (ops))
1f3ef581
MM
994 return;
995
996 for (t = ops->beneath; t != NULL; t = t->beneath)
997 if (t->to_prepare_to_store != NULL)
998 {
999 t->to_prepare_to_store (t, regcache);
1000 return;
1001 }
1002}
1003
0b722aec
MM
1004/* The branch trace frame cache. */
1005
1006struct btrace_frame_cache
1007{
1008 /* The thread. */
1009 struct thread_info *tp;
1010
1011 /* The frame info. */
1012 struct frame_info *frame;
1013
1014 /* The branch trace function segment. */
1015 const struct btrace_function *bfun;
1016};
1017
1018/* A struct btrace_frame_cache hash table indexed by NEXT. */
1019
1020static htab_t bfcache;
1021
1022/* hash_f for htab_create_alloc of bfcache. */
1023
1024static hashval_t
1025bfcache_hash (const void *arg)
1026{
1027 const struct btrace_frame_cache *cache = arg;
1028
1029 return htab_hash_pointer (cache->frame);
1030}
1031
1032/* eq_f for htab_create_alloc of bfcache. */
1033
1034static int
1035bfcache_eq (const void *arg1, const void *arg2)
1036{
1037 const struct btrace_frame_cache *cache1 = arg1;
1038 const struct btrace_frame_cache *cache2 = arg2;
1039
1040 return cache1->frame == cache2->frame;
1041}
1042
1043/* Create a new btrace frame cache. */
1044
1045static struct btrace_frame_cache *
1046bfcache_new (struct frame_info *frame)
1047{
1048 struct btrace_frame_cache *cache;
1049 void **slot;
1050
1051 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1052 cache->frame = frame;
1053
1054 slot = htab_find_slot (bfcache, cache, INSERT);
1055 gdb_assert (*slot == NULL);
1056 *slot = cache;
1057
1058 return cache;
1059}
1060
1061/* Extract the branch trace function from a branch trace frame. */
1062
1063static const struct btrace_function *
1064btrace_get_frame_function (struct frame_info *frame)
1065{
1066 const struct btrace_frame_cache *cache;
1067 const struct btrace_function *bfun;
1068 struct btrace_frame_cache pattern;
1069 void **slot;
1070
1071 pattern.frame = frame;
1072
1073 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1074 if (slot == NULL)
1075 return NULL;
1076
1077 cache = *slot;
1078 return cache->bfun;
1079}
1080
cecac1ab
MM
1081/* Implement stop_reason method for record_btrace_frame_unwind. */
1082
1083static enum unwind_stop_reason
1084record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1085 void **this_cache)
1086{
0b722aec
MM
1087 const struct btrace_frame_cache *cache;
1088 const struct btrace_function *bfun;
1089
1090 cache = *this_cache;
1091 bfun = cache->bfun;
1092 gdb_assert (bfun != NULL);
1093
1094 if (bfun->up == NULL)
1095 return UNWIND_UNAVAILABLE;
1096
1097 return UNWIND_NO_REASON;
cecac1ab
MM
1098}
1099
1100/* Implement this_id method for record_btrace_frame_unwind. */
1101
1102static void
1103record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1104 struct frame_id *this_id)
1105{
0b722aec
MM
1106 const struct btrace_frame_cache *cache;
1107 const struct btrace_function *bfun;
1108 CORE_ADDR code, special;
1109
1110 cache = *this_cache;
1111
1112 bfun = cache->bfun;
1113 gdb_assert (bfun != NULL);
1114
1115 while (bfun->segment.prev != NULL)
1116 bfun = bfun->segment.prev;
1117
1118 code = get_frame_func (this_frame);
1119 special = bfun->number;
1120
1121 *this_id = frame_id_build_unavailable_stack_special (code, special);
1122
1123 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1124 btrace_get_bfun_name (cache->bfun),
1125 core_addr_to_string_nz (this_id->code_addr),
1126 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1127}
1128
1129/* Implement prev_register method for record_btrace_frame_unwind. */
1130
1131static struct value *
1132record_btrace_frame_prev_register (struct frame_info *this_frame,
1133 void **this_cache,
1134 int regnum)
1135{
0b722aec
MM
1136 const struct btrace_frame_cache *cache;
1137 const struct btrace_function *bfun, *caller;
1138 const struct btrace_insn *insn;
1139 struct gdbarch *gdbarch;
1140 CORE_ADDR pc;
1141 int pcreg;
1142
1143 gdbarch = get_frame_arch (this_frame);
1144 pcreg = gdbarch_pc_regnum (gdbarch);
1145 if (pcreg < 0 || regnum != pcreg)
1146 throw_error (NOT_AVAILABLE_ERROR,
1147 _("Registers are not available in btrace record history"));
1148
1149 cache = *this_cache;
1150 bfun = cache->bfun;
1151 gdb_assert (bfun != NULL);
1152
1153 caller = bfun->up;
1154 if (caller == NULL)
1155 throw_error (NOT_AVAILABLE_ERROR,
1156 _("No caller in btrace record history"));
1157
1158 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1159 {
1160 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1161 pc = insn->pc;
1162 }
1163 else
1164 {
1165 insn = VEC_last (btrace_insn_s, caller->insn);
1166 pc = insn->pc;
1167
1168 pc += gdb_insn_length (gdbarch, pc);
1169 }
1170
1171 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1172 btrace_get_bfun_name (bfun), bfun->level,
1173 core_addr_to_string_nz (pc));
1174
1175 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1176}
1177
1178/* Implement sniffer method for record_btrace_frame_unwind. */
1179
1180static int
1181record_btrace_frame_sniffer (const struct frame_unwind *self,
1182 struct frame_info *this_frame,
1183 void **this_cache)
1184{
0b722aec
MM
1185 const struct btrace_function *bfun;
1186 struct btrace_frame_cache *cache;
cecac1ab 1187 struct thread_info *tp;
0b722aec 1188 struct frame_info *next;
cecac1ab
MM
1189
1190 /* THIS_FRAME does not contain a reference to its thread. */
1191 tp = find_thread_ptid (inferior_ptid);
1192 gdb_assert (tp != NULL);
1193
0b722aec
MM
1194 bfun = NULL;
1195 next = get_next_frame (this_frame);
1196 if (next == NULL)
1197 {
1198 const struct btrace_insn_iterator *replay;
1199
1200 replay = tp->btrace.replay;
1201 if (replay != NULL)
1202 bfun = replay->function;
1203 }
1204 else
1205 {
1206 const struct btrace_function *callee;
1207
1208 callee = btrace_get_frame_function (next);
1209 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1210 bfun = callee->up;
1211 }
1212
1213 if (bfun == NULL)
1214 return 0;
1215
1216 DEBUG ("[frame] sniffed frame for %s on level %d",
1217 btrace_get_bfun_name (bfun), bfun->level);
1218
1219 /* This is our frame. Initialize the frame cache. */
1220 cache = bfcache_new (this_frame);
1221 cache->tp = tp;
1222 cache->bfun = bfun;
1223
1224 *this_cache = cache;
1225 return 1;
1226}
1227
1228/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1229
1230static int
1231record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1232 struct frame_info *this_frame,
1233 void **this_cache)
1234{
1235 const struct btrace_function *bfun, *callee;
1236 struct btrace_frame_cache *cache;
1237 struct frame_info *next;
1238
1239 next = get_next_frame (this_frame);
1240 if (next == NULL)
1241 return 0;
1242
1243 callee = btrace_get_frame_function (next);
1244 if (callee == NULL)
1245 return 0;
1246
1247 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1248 return 0;
1249
1250 bfun = callee->up;
1251 if (bfun == NULL)
1252 return 0;
1253
1254 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1255 btrace_get_bfun_name (bfun), bfun->level);
1256
1257 /* This is our frame. Initialize the frame cache. */
1258 cache = bfcache_new (this_frame);
1259 cache->tp = find_thread_ptid (inferior_ptid);
1260 cache->bfun = bfun;
1261
1262 *this_cache = cache;
1263 return 1;
1264}
1265
1266static void
1267record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1268{
1269 struct btrace_frame_cache *cache;
1270 void **slot;
1271
1272 cache = this_cache;
1273
1274 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1275 gdb_assert (slot != NULL);
1276
1277 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1278}
1279
1280/* btrace recording does not store previous memory content, neither the stack
1281 frames content. Any unwinding would return errorneous results as the stack
1282 contents no longer matches the changed PC value restored from history.
1283 Therefore this unwinder reports any possibly unwound registers as
1284 <unavailable>. */
1285
0b722aec 1286const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1287{
1288 NORMAL_FRAME,
1289 record_btrace_frame_unwind_stop_reason,
1290 record_btrace_frame_this_id,
1291 record_btrace_frame_prev_register,
1292 NULL,
0b722aec
MM
1293 record_btrace_frame_sniffer,
1294 record_btrace_frame_dealloc_cache
1295};
1296
1297const struct frame_unwind record_btrace_tailcall_frame_unwind =
1298{
1299 TAILCALL_FRAME,
1300 record_btrace_frame_unwind_stop_reason,
1301 record_btrace_frame_this_id,
1302 record_btrace_frame_prev_register,
1303 NULL,
1304 record_btrace_tailcall_frame_sniffer,
1305 record_btrace_frame_dealloc_cache
cecac1ab 1306};
b2f4cfde 1307
52834460
MM
1308/* Indicate that TP should be resumed according to FLAG. */
1309
1310static void
1311record_btrace_resume_thread (struct thread_info *tp,
1312 enum btrace_thread_flag flag)
1313{
1314 struct btrace_thread_info *btinfo;
1315
1316 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1317
1318 btinfo = &tp->btrace;
1319
1320 if ((btinfo->flags & BTHR_MOVE) != 0)
1321 error (_("Thread already moving."));
1322
1323 /* Fetch the latest branch trace. */
1324 btrace_fetch (tp);
1325
1326 btinfo->flags |= flag;
1327}
1328
1329/* Find the thread to resume given a PTID. */
1330
1331static struct thread_info *
1332record_btrace_find_resume_thread (ptid_t ptid)
1333{
1334 struct thread_info *tp;
1335
1336 /* When asked to resume everything, we pick the current thread. */
1337 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1338 ptid = inferior_ptid;
1339
1340 return find_thread_ptid (ptid);
1341}
1342
1343/* Start replaying a thread. */
1344
1345static struct btrace_insn_iterator *
1346record_btrace_start_replaying (struct thread_info *tp)
1347{
1348 volatile struct gdb_exception except;
1349 struct btrace_insn_iterator *replay;
1350 struct btrace_thread_info *btinfo;
1351 int executing;
1352
1353 btinfo = &tp->btrace;
1354 replay = NULL;
1355
1356 /* We can't start replaying without trace. */
1357 if (btinfo->begin == NULL)
1358 return NULL;
1359
1360 /* Clear the executing flag to allow changes to the current frame.
1361 We are not actually running, yet. We just started a reverse execution
1362 command or a record goto command.
1363 For the latter, EXECUTING is false and this has no effect.
1364 For the former, EXECUTING is true and we're in to_wait, about to
1365 move the thread. Since we need to recompute the stack, we temporarily
1366 set EXECUTING to flase. */
1367 executing = is_executing (tp->ptid);
1368 set_executing (tp->ptid, 0);
1369
1370 /* GDB stores the current frame_id when stepping in order to detects steps
1371 into subroutines.
1372 Since frames are computed differently when we're replaying, we need to
1373 recompute those stored frames and fix them up so we can still detect
1374 subroutines after we started replaying. */
1375 TRY_CATCH (except, RETURN_MASK_ALL)
1376 {
1377 struct frame_info *frame;
1378 struct frame_id frame_id;
1379 int upd_step_frame_id, upd_step_stack_frame_id;
1380
1381 /* The current frame without replaying - computed via normal unwind. */
1382 frame = get_current_frame ();
1383 frame_id = get_frame_id (frame);
1384
1385 /* Check if we need to update any stepping-related frame id's. */
1386 upd_step_frame_id = frame_id_eq (frame_id,
1387 tp->control.step_frame_id);
1388 upd_step_stack_frame_id = frame_id_eq (frame_id,
1389 tp->control.step_stack_frame_id);
1390
1391 /* We start replaying at the end of the branch trace. This corresponds
1392 to the current instruction. */
1393 replay = xmalloc (sizeof (*replay));
1394 btrace_insn_end (replay, btinfo);
1395
1396 /* We're not replaying, yet. */
1397 gdb_assert (btinfo->replay == NULL);
1398 btinfo->replay = replay;
1399
1400 /* Make sure we're not using any stale registers. */
1401 registers_changed_ptid (tp->ptid);
1402
1403 /* The current frame with replaying - computed via btrace unwind. */
1404 frame = get_current_frame ();
1405 frame_id = get_frame_id (frame);
1406
1407 /* Replace stepping related frames where necessary. */
1408 if (upd_step_frame_id)
1409 tp->control.step_frame_id = frame_id;
1410 if (upd_step_stack_frame_id)
1411 tp->control.step_stack_frame_id = frame_id;
1412 }
1413
1414 /* Restore the previous execution state. */
1415 set_executing (tp->ptid, executing);
1416
1417 if (except.reason < 0)
1418 {
1419 xfree (btinfo->replay);
1420 btinfo->replay = NULL;
1421
1422 registers_changed_ptid (tp->ptid);
1423
1424 throw_exception (except);
1425 }
1426
1427 return replay;
1428}
1429
1430/* Stop replaying a thread. */
1431
1432static void
1433record_btrace_stop_replaying (struct thread_info *tp)
1434{
1435 struct btrace_thread_info *btinfo;
1436
1437 btinfo = &tp->btrace;
1438
1439 xfree (btinfo->replay);
1440 btinfo->replay = NULL;
1441
1442 /* Make sure we're not leaving any stale registers. */
1443 registers_changed_ptid (tp->ptid);
1444}
1445
b2f4cfde
MM
1446/* The to_resume method of target record-btrace. */
1447
1448static void
1449record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1450 enum gdb_signal signal)
1451{
52834460
MM
1452 struct thread_info *tp, *other;
1453 enum btrace_thread_flag flag;
1454
1455 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1456
1457 tp = record_btrace_find_resume_thread (ptid);
1458 if (tp == NULL)
1459 error (_("Cannot find thread to resume."));
1460
1461 /* Stop replaying other threads if the thread to resume is not replaying. */
1462 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1463 ALL_THREADS (other)
1464 record_btrace_stop_replaying (other);
1465
b2f4cfde 1466 /* As long as we're not replaying, just forward the request. */
1c63c994 1467 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1468 {
1469 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1470 if (ops->to_resume != NULL)
1471 return ops->to_resume (ops, ptid, step, signal);
1472
1473 error (_("Cannot find target for stepping."));
1474 }
1475
52834460
MM
1476 /* Compute the btrace thread flag for the requested move. */
1477 if (step == 0)
1478 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1479 else
1480 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1481
1482 /* At the moment, we only move a single thread. We could also move
1483 all threads in parallel by single-stepping each resumed thread
1484 until the first runs into an event.
1485 When we do that, we would want to continue all other threads.
1486 For now, just resume one thread to not confuse to_wait. */
1487 record_btrace_resume_thread (tp, flag);
1488
1489 /* We just indicate the resume intent here. The actual stepping happens in
1490 record_btrace_wait below. */
1491}
1492
1493/* Find a thread to move. */
1494
1495static struct thread_info *
1496record_btrace_find_thread_to_move (ptid_t ptid)
1497{
1498 struct thread_info *tp;
1499
1500 /* First check the parameter thread. */
1501 tp = find_thread_ptid (ptid);
1502 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1503 return tp;
1504
1505 /* Otherwise, find one other thread that has been resumed. */
1506 ALL_THREADS (tp)
1507 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1508 return tp;
1509
1510 return NULL;
1511}
1512
1513/* Return a target_waitstatus indicating that we ran out of history. */
1514
1515static struct target_waitstatus
1516btrace_step_no_history (void)
1517{
1518 struct target_waitstatus status;
1519
1520 status.kind = TARGET_WAITKIND_NO_HISTORY;
1521
1522 return status;
1523}
1524
1525/* Return a target_waitstatus indicating that a step finished. */
1526
1527static struct target_waitstatus
1528btrace_step_stopped (void)
1529{
1530 struct target_waitstatus status;
1531
1532 status.kind = TARGET_WAITKIND_STOPPED;
1533 status.value.sig = GDB_SIGNAL_TRAP;
1534
1535 return status;
1536}
1537
1538/* Clear the record histories. */
1539
1540static void
1541record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1542{
1543 xfree (btinfo->insn_history);
1544 xfree (btinfo->call_history);
1545
1546 btinfo->insn_history = NULL;
1547 btinfo->call_history = NULL;
1548}
1549
1550/* Step a single thread. */
1551
1552static struct target_waitstatus
1553record_btrace_step_thread (struct thread_info *tp)
1554{
1555 struct btrace_insn_iterator *replay, end;
1556 struct btrace_thread_info *btinfo;
1557 struct address_space *aspace;
1558 struct inferior *inf;
1559 enum btrace_thread_flag flags;
1560 unsigned int steps;
1561
1562 btinfo = &tp->btrace;
1563 replay = btinfo->replay;
1564
1565 flags = btinfo->flags & BTHR_MOVE;
1566 btinfo->flags &= ~BTHR_MOVE;
1567
1568 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1569
1570 switch (flags)
1571 {
1572 default:
1573 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1574
1575 case BTHR_STEP:
1576 /* We're done if we're not replaying. */
1577 if (replay == NULL)
1578 return btrace_step_no_history ();
1579
1580 /* We are always able to step at least once. */
1581 steps = btrace_insn_next (replay, 1);
1582 gdb_assert (steps == 1);
1583
1584 /* Determine the end of the instruction trace. */
1585 btrace_insn_end (&end, btinfo);
1586
1587 /* We stop replaying if we reached the end of the trace. */
1588 if (btrace_insn_cmp (replay, &end) == 0)
1589 record_btrace_stop_replaying (tp);
1590
1591 return btrace_step_stopped ();
1592
1593 case BTHR_RSTEP:
1594 /* Start replaying if we're not already doing so. */
1595 if (replay == NULL)
1596 replay = record_btrace_start_replaying (tp);
1597
1598 /* If we can't step any further, we reached the end of the history. */
1599 steps = btrace_insn_prev (replay, 1);
1600 if (steps == 0)
1601 return btrace_step_no_history ();
1602
1603 return btrace_step_stopped ();
1604
1605 case BTHR_CONT:
1606 /* We're done if we're not replaying. */
1607 if (replay == NULL)
1608 return btrace_step_no_history ();
1609
1610 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1611 aspace = inf->aspace;
1612
1613 /* Determine the end of the instruction trace. */
1614 btrace_insn_end (&end, btinfo);
1615
1616 for (;;)
1617 {
1618 const struct btrace_insn *insn;
1619
1620 /* We are always able to step at least once. */
1621 steps = btrace_insn_next (replay, 1);
1622 gdb_assert (steps == 1);
1623
1624 /* We stop replaying if we reached the end of the trace. */
1625 if (btrace_insn_cmp (replay, &end) == 0)
1626 {
1627 record_btrace_stop_replaying (tp);
1628 return btrace_step_no_history ();
1629 }
1630
1631 insn = btrace_insn_get (replay);
1632 gdb_assert (insn);
1633
1634 DEBUG ("stepping %d (%s) ... %s", tp->num,
1635 target_pid_to_str (tp->ptid),
1636 core_addr_to_string_nz (insn->pc));
1637
1638 if (breakpoint_here_p (aspace, insn->pc))
1639 return btrace_step_stopped ();
1640 }
1641
1642 case BTHR_RCONT:
1643 /* Start replaying if we're not already doing so. */
1644 if (replay == NULL)
1645 replay = record_btrace_start_replaying (tp);
1646
1647 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1648 aspace = inf->aspace;
1649
1650 for (;;)
1651 {
1652 const struct btrace_insn *insn;
1653
1654 /* If we can't step any further, we're done. */
1655 steps = btrace_insn_prev (replay, 1);
1656 if (steps == 0)
1657 return btrace_step_no_history ();
1658
1659 insn = btrace_insn_get (replay);
1660 gdb_assert (insn);
1661
1662 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1663 target_pid_to_str (tp->ptid),
1664 core_addr_to_string_nz (insn->pc));
1665
1666 if (breakpoint_here_p (aspace, insn->pc))
1667 return btrace_step_stopped ();
1668 }
1669 }
b2f4cfde
MM
1670}
1671
1672/* The to_wait method of target record-btrace. */
1673
1674static ptid_t
1675record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1676 struct target_waitstatus *status, int options)
1677{
52834460
MM
1678 struct thread_info *tp, *other;
1679
1680 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1681
b2f4cfde 1682 /* As long as we're not replaying, just forward the request. */
1c63c994 1683 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1684 {
1685 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1686 if (ops->to_wait != NULL)
1687 return ops->to_wait (ops, ptid, status, options);
1688
1689 error (_("Cannot find target for waiting."));
1690 }
1691
52834460
MM
1692 /* Let's find a thread to move. */
1693 tp = record_btrace_find_thread_to_move (ptid);
1694 if (tp == NULL)
1695 {
1696 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1697
1698 status->kind = TARGET_WAITKIND_IGNORE;
1699 return minus_one_ptid;
1700 }
1701
1702 /* We only move a single thread. We're not able to correlate threads. */
1703 *status = record_btrace_step_thread (tp);
1704
1705 /* Stop all other threads. */
1706 if (!non_stop)
1707 ALL_THREADS (other)
1708 other->btrace.flags &= ~BTHR_MOVE;
1709
1710 /* Start record histories anew from the current position. */
1711 record_btrace_clear_histories (&tp->btrace);
1712
1713 /* We moved the replay position but did not update registers. */
1714 registers_changed_ptid (tp->ptid);
1715
1716 return tp->ptid;
1717}
1718
1719/* The to_can_execute_reverse method of target record-btrace. */
1720
1721static int
19db3e69 1722record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1723{
1724 return 1;
1725}
1726
1727/* The to_decr_pc_after_break method of target record-btrace. */
1728
1729static CORE_ADDR
1730record_btrace_decr_pc_after_break (struct target_ops *ops,
1731 struct gdbarch *gdbarch)
1732{
1733 /* When replaying, we do not actually execute the breakpoint instruction
1734 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1735 if (record_btrace_is_replaying (ops))
52834460
MM
1736 return 0;
1737
1738 return forward_target_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1739}
1740
e2887aa3
MM
1741/* The to_find_new_threads method of target record-btrace. */
1742
1743static void
1744record_btrace_find_new_threads (struct target_ops *ops)
1745{
1746 /* Don't expect new threads if we're replaying. */
1c63c994 1747 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1748 return;
1749
1750 /* Forward the request. */
1751 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1752 if (ops->to_find_new_threads != NULL)
1753 {
1754 ops->to_find_new_threads (ops);
1755 break;
1756 }
1757}
1758
1759/* The to_thread_alive method of target record-btrace. */
1760
1761static int
1762record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1763{
1764 /* We don't add or remove threads during replay. */
1c63c994 1765 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1766 return find_thread_ptid (ptid) != NULL;
1767
1768 /* Forward the request. */
1769 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1770 if (ops->to_thread_alive != NULL)
1771 return ops->to_thread_alive (ops, ptid);
1772
1773 return 0;
1774}
1775
066ce621
MM
1776/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1777 is stopped. */
1778
1779static void
1780record_btrace_set_replay (struct thread_info *tp,
1781 const struct btrace_insn_iterator *it)
1782{
1783 struct btrace_thread_info *btinfo;
1784
1785 btinfo = &tp->btrace;
1786
1787 if (it == NULL || it->function == NULL)
52834460 1788 record_btrace_stop_replaying (tp);
066ce621
MM
1789 else
1790 {
1791 if (btinfo->replay == NULL)
52834460 1792 record_btrace_start_replaying (tp);
066ce621
MM
1793 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1794 return;
1795
1796 *btinfo->replay = *it;
52834460 1797 registers_changed_ptid (tp->ptid);
066ce621
MM
1798 }
1799
52834460
MM
1800 /* Start anew from the new replay position. */
1801 record_btrace_clear_histories (btinfo);
066ce621
MM
1802}
1803
1804/* The to_goto_record_begin method of target record-btrace. */
1805
1806static void
08475817 1807record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1808{
1809 struct thread_info *tp;
1810 struct btrace_insn_iterator begin;
1811
1812 tp = require_btrace_thread ();
1813
1814 btrace_insn_begin (&begin, &tp->btrace);
1815 record_btrace_set_replay (tp, &begin);
1816
1817 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1818}
1819
1820/* The to_goto_record_end method of target record-btrace. */
1821
1822static void
307a1b91 1823record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1824{
1825 struct thread_info *tp;
1826
1827 tp = require_btrace_thread ();
1828
1829 record_btrace_set_replay (tp, NULL);
1830
1831 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1832}
1833
1834/* The to_goto_record method of target record-btrace. */
1835
1836static void
606183ac 1837record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1838{
1839 struct thread_info *tp;
1840 struct btrace_insn_iterator it;
1841 unsigned int number;
1842 int found;
1843
1844 number = insn;
1845
1846 /* Check for wrap-arounds. */
1847 if (number != insn)
1848 error (_("Instruction number out of range."));
1849
1850 tp = require_btrace_thread ();
1851
1852 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1853 if (found == 0)
1854 error (_("No such instruction."));
1855
1856 record_btrace_set_replay (tp, &it);
1857
1858 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1859}
1860
afedecd3
MM
1861/* Initialize the record-btrace target ops. */
1862
1863static void
1864init_record_btrace_ops (void)
1865{
1866 struct target_ops *ops;
1867
1868 ops = &record_btrace_ops;
1869 ops->to_shortname = "record-btrace";
1870 ops->to_longname = "Branch tracing target";
1871 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1872 ops->to_open = record_btrace_open;
1873 ops->to_close = record_btrace_close;
1874 ops->to_detach = record_detach;
1875 ops->to_disconnect = record_disconnect;
1876 ops->to_mourn_inferior = record_mourn_inferior;
1877 ops->to_kill = record_kill;
1878 ops->to_create_inferior = find_default_create_inferior;
1879 ops->to_stop_recording = record_btrace_stop_recording;
1880 ops->to_info_record = record_btrace_info;
1881 ops->to_insn_history = record_btrace_insn_history;
1882 ops->to_insn_history_from = record_btrace_insn_history_from;
1883 ops->to_insn_history_range = record_btrace_insn_history_range;
1884 ops->to_call_history = record_btrace_call_history;
1885 ops->to_call_history_from = record_btrace_call_history_from;
1886 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1887 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1888 ops->to_xfer_partial = record_btrace_xfer_partial;
1889 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1890 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1891 ops->to_fetch_registers = record_btrace_fetch_registers;
1892 ops->to_store_registers = record_btrace_store_registers;
1893 ops->to_prepare_to_store = record_btrace_prepare_to_store;
cecac1ab 1894 ops->to_get_unwinder = &record_btrace_frame_unwind;
0b722aec 1895 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
b2f4cfde
MM
1896 ops->to_resume = record_btrace_resume;
1897 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1898 ops->to_find_new_threads = record_btrace_find_new_threads;
1899 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1900 ops->to_goto_record_begin = record_btrace_goto_begin;
1901 ops->to_goto_record_end = record_btrace_goto_end;
1902 ops->to_goto_record = record_btrace_goto;
52834460
MM
1903 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1904 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
afedecd3
MM
1905 ops->to_stratum = record_stratum;
1906 ops->to_magic = OPS_MAGIC;
1907}
1908
1909/* Alias for "target record". */
1910
1911static void
1912cmd_record_btrace_start (char *args, int from_tty)
1913{
1914 if (args != NULL && *args != 0)
1915 error (_("Invalid argument."));
1916
1917 execute_command ("target record-btrace", from_tty);
1918}
1919
1920void _initialize_record_btrace (void);
1921
1922/* Initialize btrace commands. */
1923
1924void
1925_initialize_record_btrace (void)
1926{
1927 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1928 _("Start branch trace recording."),
1929 &record_cmdlist);
1930 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1931
1932 init_record_btrace_ops ();
1933 add_target (&record_btrace_ops);
0b722aec
MM
1934
1935 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1936 xcalloc, xfree);
afedecd3 1937}