]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/record-btrace.c
fix buglet in nto-procfs.c
[thirdparty/binutils-gdb.git] / gdb / record-btrace.c
CommitLineData
afedecd3
MM
1/* Branch trace support for GDB, the GNU debugger.
2
ecd75fc8 3 Copyright (C) 2013-2014 Free Software Foundation, Inc.
afedecd3
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22#include "defs.h"
23#include "record.h"
24#include "gdbthread.h"
25#include "target.h"
26#include "gdbcmd.h"
27#include "disasm.h"
28#include "observer.h"
29#include "exceptions.h"
30#include "cli/cli-utils.h"
31#include "source.h"
32#include "ui-out.h"
33#include "symtab.h"
34#include "filenames.h"
1f3ef581 35#include "regcache.h"
cecac1ab 36#include "frame-unwind.h"
0b722aec 37#include "hashtab.h"
afedecd3
MM
38
39/* The target_ops of record-btrace. */
40static struct target_ops record_btrace_ops;
41
42/* A new thread observer enabling branch tracing for the new thread. */
43static struct observer *record_btrace_thread_observer;
44
633785ff
MM
45/* Temporarily allow memory accesses. */
46static int record_btrace_allow_memory_access;
47
afedecd3
MM
48/* Print a record-btrace debug message. Use do ... while (0) to avoid
49 ambiguities when used in if statements. */
50
51#define DEBUG(msg, args...) \
52 do \
53 { \
54 if (record_debug != 0) \
55 fprintf_unfiltered (gdb_stdlog, \
56 "[record-btrace] " msg "\n", ##args); \
57 } \
58 while (0)
59
60
61/* Update the branch trace for the current thread and return a pointer to its
066ce621 62 thread_info.
afedecd3
MM
63
64 Throws an error if there is no thread or no trace. This function never
65 returns NULL. */
66
066ce621
MM
67static struct thread_info *
68require_btrace_thread (void)
afedecd3
MM
69{
70 struct thread_info *tp;
afedecd3
MM
71
72 DEBUG ("require");
73
74 tp = find_thread_ptid (inferior_ptid);
75 if (tp == NULL)
76 error (_("No thread."));
77
78 btrace_fetch (tp);
79
6e07b1d2 80 if (btrace_is_empty (tp))
afedecd3
MM
81 error (_("No trace."));
82
066ce621
MM
83 return tp;
84}
85
86/* Update the branch trace for the current thread and return a pointer to its
87 branch trace information struct.
88
89 Throws an error if there is no thread or no trace. This function never
90 returns NULL. */
91
92static struct btrace_thread_info *
93require_btrace (void)
94{
95 struct thread_info *tp;
96
97 tp = require_btrace_thread ();
98
99 return &tp->btrace;
afedecd3
MM
100}
101
102/* Enable branch tracing for one thread. Warn on errors. */
103
104static void
105record_btrace_enable_warn (struct thread_info *tp)
106{
107 volatile struct gdb_exception error;
108
109 TRY_CATCH (error, RETURN_MASK_ERROR)
110 btrace_enable (tp);
111
112 if (error.message != NULL)
113 warning ("%s", error.message);
114}
115
116/* Callback function to disable branch tracing for one thread. */
117
118static void
119record_btrace_disable_callback (void *arg)
120{
121 struct thread_info *tp;
122
123 tp = arg;
124
125 btrace_disable (tp);
126}
127
128/* Enable automatic tracing of new threads. */
129
130static void
131record_btrace_auto_enable (void)
132{
133 DEBUG ("attach thread observer");
134
135 record_btrace_thread_observer
136 = observer_attach_new_thread (record_btrace_enable_warn);
137}
138
139/* Disable automatic tracing of new threads. */
140
141static void
142record_btrace_auto_disable (void)
143{
144 /* The observer may have been detached, already. */
145 if (record_btrace_thread_observer == NULL)
146 return;
147
148 DEBUG ("detach thread observer");
149
150 observer_detach_new_thread (record_btrace_thread_observer);
151 record_btrace_thread_observer = NULL;
152}
153
154/* The to_open method of target record-btrace. */
155
156static void
157record_btrace_open (char *args, int from_tty)
158{
159 struct cleanup *disable_chain;
160 struct thread_info *tp;
161
162 DEBUG ("open");
163
8213266a 164 record_preopen ();
afedecd3
MM
165
166 if (!target_has_execution)
167 error (_("The program is not being run."));
168
169 if (!target_supports_btrace ())
170 error (_("Target does not support branch tracing."));
171
52834460
MM
172 if (non_stop)
173 error (_("Record btrace can't debug inferior in non-stop mode."));
174
afedecd3
MM
175 gdb_assert (record_btrace_thread_observer == NULL);
176
177 disable_chain = make_cleanup (null_cleanup, NULL);
178 ALL_THREADS (tp)
179 if (args == NULL || *args == 0 || number_is_in_list (args, tp->num))
180 {
181 btrace_enable (tp);
182
183 make_cleanup (record_btrace_disable_callback, tp);
184 }
185
186 record_btrace_auto_enable ();
187
188 push_target (&record_btrace_ops);
189
190 observer_notify_record_changed (current_inferior (), 1);
191
192 discard_cleanups (disable_chain);
193}
194
195/* The to_stop_recording method of target record-btrace. */
196
197static void
c6cd7c02 198record_btrace_stop_recording (struct target_ops *self)
afedecd3
MM
199{
200 struct thread_info *tp;
201
202 DEBUG ("stop recording");
203
204 record_btrace_auto_disable ();
205
206 ALL_THREADS (tp)
207 if (tp->btrace.target != NULL)
208 btrace_disable (tp);
209}
210
211/* The to_close method of target record-btrace. */
212
213static void
de90e03d 214record_btrace_close (struct target_ops *self)
afedecd3 215{
568e808b
MM
216 struct thread_info *tp;
217
99c819ee
MM
218 /* Make sure automatic recording gets disabled even if we did not stop
219 recording before closing the record-btrace target. */
220 record_btrace_auto_disable ();
221
568e808b
MM
222 /* We should have already stopped recording.
223 Tear down btrace in case we have not. */
224 ALL_THREADS (tp)
225 btrace_teardown (tp);
afedecd3
MM
226}
227
228/* The to_info_record method of target record-btrace. */
229
230static void
630d6a4a 231record_btrace_info (struct target_ops *self)
afedecd3
MM
232{
233 struct btrace_thread_info *btinfo;
234 struct thread_info *tp;
23a7fe75 235 unsigned int insns, calls;
afedecd3
MM
236
237 DEBUG ("info");
238
239 tp = find_thread_ptid (inferior_ptid);
240 if (tp == NULL)
241 error (_("No thread."));
242
243 btrace_fetch (tp);
244
23a7fe75
MM
245 insns = 0;
246 calls = 0;
247
afedecd3 248 btinfo = &tp->btrace;
6e07b1d2
MM
249
250 if (!btrace_is_empty (tp))
23a7fe75
MM
251 {
252 struct btrace_call_iterator call;
253 struct btrace_insn_iterator insn;
254
255 btrace_call_end (&call, btinfo);
256 btrace_call_prev (&call, 1);
5de9129b 257 calls = btrace_call_number (&call);
23a7fe75
MM
258
259 btrace_insn_end (&insn, btinfo);
260 btrace_insn_prev (&insn, 1);
5de9129b 261 insns = btrace_insn_number (&insn);
23a7fe75 262 }
afedecd3
MM
263
264 printf_unfiltered (_("Recorded %u instructions in %u functions for thread "
23a7fe75 265 "%d (%s).\n"), insns, calls, tp->num,
afedecd3 266 target_pid_to_str (tp->ptid));
07bbe694
MM
267
268 if (btrace_is_replaying (tp))
269 printf_unfiltered (_("Replay in progress. At instruction %u.\n"),
270 btrace_insn_number (btinfo->replay));
afedecd3
MM
271}
272
273/* Print an unsigned int. */
274
275static void
276ui_out_field_uint (struct ui_out *uiout, const char *fld, unsigned int val)
277{
278 ui_out_field_fmt (uiout, fld, "%u", val);
279}
280
281/* Disassemble a section of the recorded instruction trace. */
282
283static void
23a7fe75
MM
284btrace_insn_history (struct ui_out *uiout,
285 const struct btrace_insn_iterator *begin,
286 const struct btrace_insn_iterator *end, int flags)
afedecd3
MM
287{
288 struct gdbarch *gdbarch;
23a7fe75 289 struct btrace_insn_iterator it;
afedecd3 290
23a7fe75
MM
291 DEBUG ("itrace (0x%x): [%u; %u)", flags, btrace_insn_number (begin),
292 btrace_insn_number (end));
afedecd3
MM
293
294 gdbarch = target_gdbarch ();
295
23a7fe75 296 for (it = *begin; btrace_insn_cmp (&it, end) != 0; btrace_insn_next (&it, 1))
afedecd3 297 {
23a7fe75
MM
298 const struct btrace_insn *insn;
299
300 insn = btrace_insn_get (&it);
301
afedecd3 302 /* Print the instruction index. */
23a7fe75 303 ui_out_field_uint (uiout, "index", btrace_insn_number (&it));
afedecd3
MM
304 ui_out_text (uiout, "\t");
305
306 /* Disassembly with '/m' flag may not produce the expected result.
307 See PR gdb/11833. */
23a7fe75 308 gdb_disassembly (gdbarch, uiout, NULL, flags, 1, insn->pc, insn->pc + 1);
afedecd3
MM
309 }
310}
311
312/* The to_insn_history method of target record-btrace. */
313
314static void
7a6c5609 315record_btrace_insn_history (struct target_ops *self, int size, int flags)
afedecd3
MM
316{
317 struct btrace_thread_info *btinfo;
23a7fe75
MM
318 struct btrace_insn_history *history;
319 struct btrace_insn_iterator begin, end;
afedecd3
MM
320 struct cleanup *uiout_cleanup;
321 struct ui_out *uiout;
23a7fe75 322 unsigned int context, covered;
afedecd3
MM
323
324 uiout = current_uiout;
325 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
326 "insn history");
afedecd3 327 context = abs (size);
afedecd3
MM
328 if (context == 0)
329 error (_("Bad record instruction-history-size."));
330
23a7fe75
MM
331 btinfo = require_btrace ();
332 history = btinfo->insn_history;
333 if (history == NULL)
afedecd3 334 {
07bbe694 335 struct btrace_insn_iterator *replay;
afedecd3 336
23a7fe75 337 DEBUG ("insn-history (0x%x): %d", flags, size);
afedecd3 338
07bbe694
MM
339 /* If we're replaying, we start at the replay position. Otherwise, we
340 start at the tail of the trace. */
341 replay = btinfo->replay;
342 if (replay != NULL)
343 begin = *replay;
344 else
345 btrace_insn_end (&begin, btinfo);
346
347 /* We start from here and expand in the requested direction. Then we
348 expand in the other direction, as well, to fill up any remaining
349 context. */
350 end = begin;
351 if (size < 0)
352 {
353 /* We want the current position covered, as well. */
354 covered = btrace_insn_next (&end, 1);
355 covered += btrace_insn_prev (&begin, context - covered);
356 covered += btrace_insn_next (&end, context - covered);
357 }
358 else
359 {
360 covered = btrace_insn_next (&end, context);
361 covered += btrace_insn_prev (&begin, context - covered);
362 }
afedecd3
MM
363 }
364 else
365 {
23a7fe75
MM
366 begin = history->begin;
367 end = history->end;
afedecd3 368
23a7fe75
MM
369 DEBUG ("insn-history (0x%x): %d, prev: [%u; %u)", flags, size,
370 btrace_insn_number (&begin), btrace_insn_number (&end));
afedecd3 371
23a7fe75
MM
372 if (size < 0)
373 {
374 end = begin;
375 covered = btrace_insn_prev (&begin, context);
376 }
377 else
378 {
379 begin = end;
380 covered = btrace_insn_next (&end, context);
381 }
afedecd3
MM
382 }
383
23a7fe75
MM
384 if (covered > 0)
385 btrace_insn_history (uiout, &begin, &end, flags);
386 else
387 {
388 if (size < 0)
389 printf_unfiltered (_("At the start of the branch trace record.\n"));
390 else
391 printf_unfiltered (_("At the end of the branch trace record.\n"));
392 }
afedecd3 393
23a7fe75 394 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
395 do_cleanups (uiout_cleanup);
396}
397
398/* The to_insn_history_range method of target record-btrace. */
399
400static void
4e99c6b7
TT
401record_btrace_insn_history_range (struct target_ops *self,
402 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
403{
404 struct btrace_thread_info *btinfo;
23a7fe75
MM
405 struct btrace_insn_history *history;
406 struct btrace_insn_iterator begin, end;
afedecd3
MM
407 struct cleanup *uiout_cleanup;
408 struct ui_out *uiout;
23a7fe75
MM
409 unsigned int low, high;
410 int found;
afedecd3
MM
411
412 uiout = current_uiout;
413 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
414 "insn history");
23a7fe75
MM
415 low = from;
416 high = to;
afedecd3 417
23a7fe75 418 DEBUG ("insn-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
419
420 /* Check for wrap-arounds. */
23a7fe75 421 if (low != from || high != to)
afedecd3
MM
422 error (_("Bad range."));
423
0688d04e 424 if (high < low)
afedecd3
MM
425 error (_("Bad range."));
426
23a7fe75 427 btinfo = require_btrace ();
afedecd3 428
23a7fe75
MM
429 found = btrace_find_insn_by_number (&begin, btinfo, low);
430 if (found == 0)
431 error (_("Range out of bounds."));
afedecd3 432
23a7fe75
MM
433 found = btrace_find_insn_by_number (&end, btinfo, high);
434 if (found == 0)
0688d04e
MM
435 {
436 /* Silently truncate the range. */
437 btrace_insn_end (&end, btinfo);
438 }
439 else
440 {
441 /* We want both begin and end to be inclusive. */
442 btrace_insn_next (&end, 1);
443 }
afedecd3 444
23a7fe75
MM
445 btrace_insn_history (uiout, &begin, &end, flags);
446 btrace_set_insn_history (btinfo, &begin, &end);
afedecd3
MM
447
448 do_cleanups (uiout_cleanup);
449}
450
451/* The to_insn_history_from method of target record-btrace. */
452
453static void
9abc3ff3
TT
454record_btrace_insn_history_from (struct target_ops *self,
455 ULONGEST from, int size, int flags)
afedecd3
MM
456{
457 ULONGEST begin, end, context;
458
459 context = abs (size);
0688d04e
MM
460 if (context == 0)
461 error (_("Bad record instruction-history-size."));
afedecd3
MM
462
463 if (size < 0)
464 {
465 end = from;
466
467 if (from < context)
468 begin = 0;
469 else
0688d04e 470 begin = from - context + 1;
afedecd3
MM
471 }
472 else
473 {
474 begin = from;
0688d04e 475 end = from + context - 1;
afedecd3
MM
476
477 /* Check for wrap-around. */
478 if (end < begin)
479 end = ULONGEST_MAX;
480 }
481
4e99c6b7 482 record_btrace_insn_history_range (self, begin, end, flags);
afedecd3
MM
483}
484
485/* Print the instruction number range for a function call history line. */
486
487static void
23a7fe75
MM
488btrace_call_history_insn_range (struct ui_out *uiout,
489 const struct btrace_function *bfun)
afedecd3 490{
7acbe133
MM
491 unsigned int begin, end, size;
492
493 size = VEC_length (btrace_insn_s, bfun->insn);
494 gdb_assert (size > 0);
afedecd3 495
23a7fe75 496 begin = bfun->insn_offset;
7acbe133 497 end = begin + size - 1;
afedecd3 498
23a7fe75 499 ui_out_field_uint (uiout, "insn begin", begin);
8710b709 500 ui_out_text (uiout, ",");
23a7fe75 501 ui_out_field_uint (uiout, "insn end", end);
afedecd3
MM
502}
503
504/* Print the source line information for a function call history line. */
505
506static void
23a7fe75
MM
507btrace_call_history_src_line (struct ui_out *uiout,
508 const struct btrace_function *bfun)
afedecd3
MM
509{
510 struct symbol *sym;
23a7fe75 511 int begin, end;
afedecd3
MM
512
513 sym = bfun->sym;
514 if (sym == NULL)
515 return;
516
517 ui_out_field_string (uiout, "file",
518 symtab_to_filename_for_display (sym->symtab));
519
23a7fe75
MM
520 begin = bfun->lbegin;
521 end = bfun->lend;
522
523 if (end < begin)
afedecd3
MM
524 return;
525
526 ui_out_text (uiout, ":");
23a7fe75 527 ui_out_field_int (uiout, "min line", begin);
afedecd3 528
23a7fe75 529 if (end == begin)
afedecd3
MM
530 return;
531
8710b709 532 ui_out_text (uiout, ",");
23a7fe75 533 ui_out_field_int (uiout, "max line", end);
afedecd3
MM
534}
535
0b722aec
MM
536/* Get the name of a branch trace function. */
537
538static const char *
539btrace_get_bfun_name (const struct btrace_function *bfun)
540{
541 struct minimal_symbol *msym;
542 struct symbol *sym;
543
544 if (bfun == NULL)
545 return "??";
546
547 msym = bfun->msym;
548 sym = bfun->sym;
549
550 if (sym != NULL)
551 return SYMBOL_PRINT_NAME (sym);
552 else if (msym != NULL)
553 return SYMBOL_PRINT_NAME (msym);
554 else
555 return "??";
556}
557
afedecd3
MM
558/* Disassemble a section of the recorded function trace. */
559
560static void
23a7fe75 561btrace_call_history (struct ui_out *uiout,
8710b709 562 const struct btrace_thread_info *btinfo,
23a7fe75
MM
563 const struct btrace_call_iterator *begin,
564 const struct btrace_call_iterator *end,
afedecd3
MM
565 enum record_print_flag flags)
566{
23a7fe75 567 struct btrace_call_iterator it;
afedecd3 568
23a7fe75
MM
569 DEBUG ("ftrace (0x%x): [%u; %u)", flags, btrace_call_number (begin),
570 btrace_call_number (end));
afedecd3 571
23a7fe75 572 for (it = *begin; btrace_call_cmp (&it, end) < 0; btrace_call_next (&it, 1))
afedecd3 573 {
23a7fe75
MM
574 const struct btrace_function *bfun;
575 struct minimal_symbol *msym;
576 struct symbol *sym;
577
578 bfun = btrace_call_get (&it);
23a7fe75 579 sym = bfun->sym;
0b722aec 580 msym = bfun->msym;
23a7fe75 581
afedecd3 582 /* Print the function index. */
23a7fe75 583 ui_out_field_uint (uiout, "index", bfun->number);
afedecd3
MM
584 ui_out_text (uiout, "\t");
585
8710b709
MM
586 if ((flags & RECORD_PRINT_INDENT_CALLS) != 0)
587 {
588 int level = bfun->level + btinfo->level, i;
589
590 for (i = 0; i < level; ++i)
591 ui_out_text (uiout, " ");
592 }
593
594 if (sym != NULL)
595 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (sym));
596 else if (msym != NULL)
597 ui_out_field_string (uiout, "function", SYMBOL_PRINT_NAME (msym));
598 else if (!ui_out_is_mi_like_p (uiout))
599 ui_out_field_string (uiout, "function", "??");
600
1e038f67 601 if ((flags & RECORD_PRINT_INSN_RANGE) != 0)
afedecd3 602 {
8710b709 603 ui_out_text (uiout, _("\tinst "));
23a7fe75 604 btrace_call_history_insn_range (uiout, bfun);
afedecd3
MM
605 }
606
1e038f67 607 if ((flags & RECORD_PRINT_SRC_LINE) != 0)
afedecd3 608 {
8710b709 609 ui_out_text (uiout, _("\tat "));
23a7fe75 610 btrace_call_history_src_line (uiout, bfun);
afedecd3
MM
611 }
612
afedecd3
MM
613 ui_out_text (uiout, "\n");
614 }
615}
616
617/* The to_call_history method of target record-btrace. */
618
619static void
5df2fcba 620record_btrace_call_history (struct target_ops *self, int size, int flags)
afedecd3
MM
621{
622 struct btrace_thread_info *btinfo;
23a7fe75
MM
623 struct btrace_call_history *history;
624 struct btrace_call_iterator begin, end;
afedecd3
MM
625 struct cleanup *uiout_cleanup;
626 struct ui_out *uiout;
23a7fe75 627 unsigned int context, covered;
afedecd3
MM
628
629 uiout = current_uiout;
630 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
631 "insn history");
afedecd3 632 context = abs (size);
afedecd3
MM
633 if (context == 0)
634 error (_("Bad record function-call-history-size."));
635
23a7fe75
MM
636 btinfo = require_btrace ();
637 history = btinfo->call_history;
638 if (history == NULL)
afedecd3 639 {
07bbe694 640 struct btrace_insn_iterator *replay;
afedecd3 641
23a7fe75 642 DEBUG ("call-history (0x%x): %d", flags, size);
afedecd3 643
07bbe694
MM
644 /* If we're replaying, we start at the replay position. Otherwise, we
645 start at the tail of the trace. */
646 replay = btinfo->replay;
647 if (replay != NULL)
648 {
649 begin.function = replay->function;
650 begin.btinfo = btinfo;
651 }
652 else
653 btrace_call_end (&begin, btinfo);
654
655 /* We start from here and expand in the requested direction. Then we
656 expand in the other direction, as well, to fill up any remaining
657 context. */
658 end = begin;
659 if (size < 0)
660 {
661 /* We want the current position covered, as well. */
662 covered = btrace_call_next (&end, 1);
663 covered += btrace_call_prev (&begin, context - covered);
664 covered += btrace_call_next (&end, context - covered);
665 }
666 else
667 {
668 covered = btrace_call_next (&end, context);
669 covered += btrace_call_prev (&begin, context- covered);
670 }
afedecd3
MM
671 }
672 else
673 {
23a7fe75
MM
674 begin = history->begin;
675 end = history->end;
afedecd3 676
23a7fe75
MM
677 DEBUG ("call-history (0x%x): %d, prev: [%u; %u)", flags, size,
678 btrace_call_number (&begin), btrace_call_number (&end));
afedecd3 679
23a7fe75
MM
680 if (size < 0)
681 {
682 end = begin;
683 covered = btrace_call_prev (&begin, context);
684 }
685 else
686 {
687 begin = end;
688 covered = btrace_call_next (&end, context);
689 }
afedecd3
MM
690 }
691
23a7fe75 692 if (covered > 0)
8710b709 693 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75
MM
694 else
695 {
696 if (size < 0)
697 printf_unfiltered (_("At the start of the branch trace record.\n"));
698 else
699 printf_unfiltered (_("At the end of the branch trace record.\n"));
700 }
afedecd3 701
23a7fe75 702 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
703 do_cleanups (uiout_cleanup);
704}
705
706/* The to_call_history_range method of target record-btrace. */
707
708static void
f0d960ea
TT
709record_btrace_call_history_range (struct target_ops *self,
710 ULONGEST from, ULONGEST to, int flags)
afedecd3
MM
711{
712 struct btrace_thread_info *btinfo;
23a7fe75
MM
713 struct btrace_call_history *history;
714 struct btrace_call_iterator begin, end;
afedecd3
MM
715 struct cleanup *uiout_cleanup;
716 struct ui_out *uiout;
23a7fe75
MM
717 unsigned int low, high;
718 int found;
afedecd3
MM
719
720 uiout = current_uiout;
721 uiout_cleanup = make_cleanup_ui_out_tuple_begin_end (uiout,
722 "func history");
23a7fe75
MM
723 low = from;
724 high = to;
afedecd3 725
23a7fe75 726 DEBUG ("call-history (0x%x): [%u; %u)", flags, low, high);
afedecd3
MM
727
728 /* Check for wrap-arounds. */
23a7fe75 729 if (low != from || high != to)
afedecd3
MM
730 error (_("Bad range."));
731
0688d04e 732 if (high < low)
afedecd3
MM
733 error (_("Bad range."));
734
23a7fe75 735 btinfo = require_btrace ();
afedecd3 736
23a7fe75
MM
737 found = btrace_find_call_by_number (&begin, btinfo, low);
738 if (found == 0)
739 error (_("Range out of bounds."));
afedecd3 740
23a7fe75
MM
741 found = btrace_find_call_by_number (&end, btinfo, high);
742 if (found == 0)
0688d04e
MM
743 {
744 /* Silently truncate the range. */
745 btrace_call_end (&end, btinfo);
746 }
747 else
748 {
749 /* We want both begin and end to be inclusive. */
750 btrace_call_next (&end, 1);
751 }
afedecd3 752
8710b709 753 btrace_call_history (uiout, btinfo, &begin, &end, flags);
23a7fe75 754 btrace_set_call_history (btinfo, &begin, &end);
afedecd3
MM
755
756 do_cleanups (uiout_cleanup);
757}
758
759/* The to_call_history_from method of target record-btrace. */
760
761static void
ec0aea04
TT
762record_btrace_call_history_from (struct target_ops *self,
763 ULONGEST from, int size, int flags)
afedecd3
MM
764{
765 ULONGEST begin, end, context;
766
767 context = abs (size);
0688d04e
MM
768 if (context == 0)
769 error (_("Bad record function-call-history-size."));
afedecd3
MM
770
771 if (size < 0)
772 {
773 end = from;
774
775 if (from < context)
776 begin = 0;
777 else
0688d04e 778 begin = from - context + 1;
afedecd3
MM
779 }
780 else
781 {
782 begin = from;
0688d04e 783 end = from + context - 1;
afedecd3
MM
784
785 /* Check for wrap-around. */
786 if (end < begin)
787 end = ULONGEST_MAX;
788 }
789
f0d960ea 790 record_btrace_call_history_range (self, begin, end, flags);
afedecd3
MM
791}
792
07bbe694
MM
793/* The to_record_is_replaying method of target record-btrace. */
794
795static int
1c63c994 796record_btrace_is_replaying (struct target_ops *self)
07bbe694
MM
797{
798 struct thread_info *tp;
799
800 ALL_THREADS (tp)
801 if (btrace_is_replaying (tp))
802 return 1;
803
804 return 0;
805}
806
633785ff
MM
807/* The to_xfer_partial method of target record-btrace. */
808
9b409511 809static enum target_xfer_status
633785ff
MM
810record_btrace_xfer_partial (struct target_ops *ops, enum target_object object,
811 const char *annex, gdb_byte *readbuf,
812 const gdb_byte *writebuf, ULONGEST offset,
9b409511 813 ULONGEST len, ULONGEST *xfered_len)
633785ff
MM
814{
815 struct target_ops *t;
816
817 /* Filter out requests that don't make sense during replay. */
1c63c994 818 if (!record_btrace_allow_memory_access && record_btrace_is_replaying (ops))
633785ff
MM
819 {
820 switch (object)
821 {
822 case TARGET_OBJECT_MEMORY:
823 {
824 struct target_section *section;
825
826 /* We do not allow writing memory in general. */
827 if (writebuf != NULL)
9b409511
YQ
828 {
829 *xfered_len = len;
830 return TARGET_XFER_E_UNAVAILABLE;
831 }
633785ff
MM
832
833 /* We allow reading readonly memory. */
834 section = target_section_by_addr (ops, offset);
835 if (section != NULL)
836 {
837 /* Check if the section we found is readonly. */
838 if ((bfd_get_section_flags (section->the_bfd_section->owner,
839 section->the_bfd_section)
840 & SEC_READONLY) != 0)
841 {
842 /* Truncate the request to fit into this section. */
843 len = min (len, section->endaddr - offset);
844 break;
845 }
846 }
847
9b409511 848 *xfered_len = len;
633785ff
MM
849 return TARGET_XFER_E_UNAVAILABLE;
850 }
851 }
852 }
853
854 /* Forward the request. */
855 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
856 if (ops->to_xfer_partial != NULL)
857 return ops->to_xfer_partial (ops, object, annex, readbuf, writebuf,
9b409511 858 offset, len, xfered_len);
633785ff 859
9b409511 860 *xfered_len = len;
633785ff
MM
861 return TARGET_XFER_E_UNAVAILABLE;
862}
863
864/* The to_insert_breakpoint method of target record-btrace. */
865
866static int
867record_btrace_insert_breakpoint (struct target_ops *ops,
868 struct gdbarch *gdbarch,
869 struct bp_target_info *bp_tgt)
870{
871 volatile struct gdb_exception except;
872 int old, ret;
873
874 /* Inserting breakpoints requires accessing memory. Allow it for the
875 duration of this function. */
876 old = record_btrace_allow_memory_access;
877 record_btrace_allow_memory_access = 1;
878
879 ret = 0;
880 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 881 ret = ops->beneath->to_insert_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff
MM
882
883 record_btrace_allow_memory_access = old;
884
885 if (except.reason < 0)
886 throw_exception (except);
887
888 return ret;
889}
890
891/* The to_remove_breakpoint method of target record-btrace. */
892
893static int
894record_btrace_remove_breakpoint (struct target_ops *ops,
895 struct gdbarch *gdbarch,
896 struct bp_target_info *bp_tgt)
897{
898 volatile struct gdb_exception except;
899 int old, ret;
900
901 /* Removing breakpoints requires accessing memory. Allow it for the
902 duration of this function. */
903 old = record_btrace_allow_memory_access;
904 record_btrace_allow_memory_access = 1;
905
906 ret = 0;
907 TRY_CATCH (except, RETURN_MASK_ALL)
6b84065d 908 ret = ops->beneath->to_remove_breakpoint (ops->beneath, gdbarch, bp_tgt);
633785ff
MM
909
910 record_btrace_allow_memory_access = old;
911
912 if (except.reason < 0)
913 throw_exception (except);
914
915 return ret;
916}
917
1f3ef581
MM
918/* The to_fetch_registers method of target record-btrace. */
919
920static void
921record_btrace_fetch_registers (struct target_ops *ops,
922 struct regcache *regcache, int regno)
923{
924 struct btrace_insn_iterator *replay;
925 struct thread_info *tp;
926
927 tp = find_thread_ptid (inferior_ptid);
928 gdb_assert (tp != NULL);
929
930 replay = tp->btrace.replay;
931 if (replay != NULL)
932 {
933 const struct btrace_insn *insn;
934 struct gdbarch *gdbarch;
935 int pcreg;
936
937 gdbarch = get_regcache_arch (regcache);
938 pcreg = gdbarch_pc_regnum (gdbarch);
939 if (pcreg < 0)
940 return;
941
942 /* We can only provide the PC register. */
943 if (regno >= 0 && regno != pcreg)
944 return;
945
946 insn = btrace_insn_get (replay);
947 gdb_assert (insn != NULL);
948
949 regcache_raw_supply (regcache, regno, &insn->pc);
950 }
951 else
952 {
953 struct target_ops *t;
954
955 for (t = ops->beneath; t != NULL; t = t->beneath)
956 if (t->to_fetch_registers != NULL)
957 {
958 t->to_fetch_registers (t, regcache, regno);
959 break;
960 }
961 }
962}
963
964/* The to_store_registers method of target record-btrace. */
965
966static void
967record_btrace_store_registers (struct target_ops *ops,
968 struct regcache *regcache, int regno)
969{
970 struct target_ops *t;
971
1c63c994 972 if (record_btrace_is_replaying (ops))
1f3ef581
MM
973 error (_("This record target does not allow writing registers."));
974
975 gdb_assert (may_write_registers != 0);
976
977 for (t = ops->beneath; t != NULL; t = t->beneath)
978 if (t->to_store_registers != NULL)
979 {
980 t->to_store_registers (t, regcache, regno);
981 return;
982 }
983
984 noprocess ();
985}
986
987/* The to_prepare_to_store method of target record-btrace. */
988
989static void
990record_btrace_prepare_to_store (struct target_ops *ops,
991 struct regcache *regcache)
992{
993 struct target_ops *t;
994
1c63c994 995 if (record_btrace_is_replaying (ops))
1f3ef581
MM
996 return;
997
998 for (t = ops->beneath; t != NULL; t = t->beneath)
999 if (t->to_prepare_to_store != NULL)
1000 {
1001 t->to_prepare_to_store (t, regcache);
1002 return;
1003 }
1004}
1005
0b722aec
MM
1006/* The branch trace frame cache. */
1007
1008struct btrace_frame_cache
1009{
1010 /* The thread. */
1011 struct thread_info *tp;
1012
1013 /* The frame info. */
1014 struct frame_info *frame;
1015
1016 /* The branch trace function segment. */
1017 const struct btrace_function *bfun;
1018};
1019
1020/* A struct btrace_frame_cache hash table indexed by NEXT. */
1021
1022static htab_t bfcache;
1023
1024/* hash_f for htab_create_alloc of bfcache. */
1025
1026static hashval_t
1027bfcache_hash (const void *arg)
1028{
1029 const struct btrace_frame_cache *cache = arg;
1030
1031 return htab_hash_pointer (cache->frame);
1032}
1033
1034/* eq_f for htab_create_alloc of bfcache. */
1035
1036static int
1037bfcache_eq (const void *arg1, const void *arg2)
1038{
1039 const struct btrace_frame_cache *cache1 = arg1;
1040 const struct btrace_frame_cache *cache2 = arg2;
1041
1042 return cache1->frame == cache2->frame;
1043}
1044
1045/* Create a new btrace frame cache. */
1046
1047static struct btrace_frame_cache *
1048bfcache_new (struct frame_info *frame)
1049{
1050 struct btrace_frame_cache *cache;
1051 void **slot;
1052
1053 cache = FRAME_OBSTACK_ZALLOC (struct btrace_frame_cache);
1054 cache->frame = frame;
1055
1056 slot = htab_find_slot (bfcache, cache, INSERT);
1057 gdb_assert (*slot == NULL);
1058 *slot = cache;
1059
1060 return cache;
1061}
1062
1063/* Extract the branch trace function from a branch trace frame. */
1064
1065static const struct btrace_function *
1066btrace_get_frame_function (struct frame_info *frame)
1067{
1068 const struct btrace_frame_cache *cache;
1069 const struct btrace_function *bfun;
1070 struct btrace_frame_cache pattern;
1071 void **slot;
1072
1073 pattern.frame = frame;
1074
1075 slot = htab_find_slot (bfcache, &pattern, NO_INSERT);
1076 if (slot == NULL)
1077 return NULL;
1078
1079 cache = *slot;
1080 return cache->bfun;
1081}
1082
cecac1ab
MM
1083/* Implement stop_reason method for record_btrace_frame_unwind. */
1084
1085static enum unwind_stop_reason
1086record_btrace_frame_unwind_stop_reason (struct frame_info *this_frame,
1087 void **this_cache)
1088{
0b722aec
MM
1089 const struct btrace_frame_cache *cache;
1090 const struct btrace_function *bfun;
1091
1092 cache = *this_cache;
1093 bfun = cache->bfun;
1094 gdb_assert (bfun != NULL);
1095
1096 if (bfun->up == NULL)
1097 return UNWIND_UNAVAILABLE;
1098
1099 return UNWIND_NO_REASON;
cecac1ab
MM
1100}
1101
1102/* Implement this_id method for record_btrace_frame_unwind. */
1103
1104static void
1105record_btrace_frame_this_id (struct frame_info *this_frame, void **this_cache,
1106 struct frame_id *this_id)
1107{
0b722aec
MM
1108 const struct btrace_frame_cache *cache;
1109 const struct btrace_function *bfun;
1110 CORE_ADDR code, special;
1111
1112 cache = *this_cache;
1113
1114 bfun = cache->bfun;
1115 gdb_assert (bfun != NULL);
1116
1117 while (bfun->segment.prev != NULL)
1118 bfun = bfun->segment.prev;
1119
1120 code = get_frame_func (this_frame);
1121 special = bfun->number;
1122
1123 *this_id = frame_id_build_unavailable_stack_special (code, special);
1124
1125 DEBUG ("[frame] %s id: (!stack, pc=%s, special=%s)",
1126 btrace_get_bfun_name (cache->bfun),
1127 core_addr_to_string_nz (this_id->code_addr),
1128 core_addr_to_string_nz (this_id->special_addr));
cecac1ab
MM
1129}
1130
1131/* Implement prev_register method for record_btrace_frame_unwind. */
1132
1133static struct value *
1134record_btrace_frame_prev_register (struct frame_info *this_frame,
1135 void **this_cache,
1136 int regnum)
1137{
0b722aec
MM
1138 const struct btrace_frame_cache *cache;
1139 const struct btrace_function *bfun, *caller;
1140 const struct btrace_insn *insn;
1141 struct gdbarch *gdbarch;
1142 CORE_ADDR pc;
1143 int pcreg;
1144
1145 gdbarch = get_frame_arch (this_frame);
1146 pcreg = gdbarch_pc_regnum (gdbarch);
1147 if (pcreg < 0 || regnum != pcreg)
1148 throw_error (NOT_AVAILABLE_ERROR,
1149 _("Registers are not available in btrace record history"));
1150
1151 cache = *this_cache;
1152 bfun = cache->bfun;
1153 gdb_assert (bfun != NULL);
1154
1155 caller = bfun->up;
1156 if (caller == NULL)
1157 throw_error (NOT_AVAILABLE_ERROR,
1158 _("No caller in btrace record history"));
1159
1160 if ((bfun->flags & BFUN_UP_LINKS_TO_RET) != 0)
1161 {
1162 insn = VEC_index (btrace_insn_s, caller->insn, 0);
1163 pc = insn->pc;
1164 }
1165 else
1166 {
1167 insn = VEC_last (btrace_insn_s, caller->insn);
1168 pc = insn->pc;
1169
1170 pc += gdb_insn_length (gdbarch, pc);
1171 }
1172
1173 DEBUG ("[frame] unwound PC in %s on level %d: %s",
1174 btrace_get_bfun_name (bfun), bfun->level,
1175 core_addr_to_string_nz (pc));
1176
1177 return frame_unwind_got_address (this_frame, regnum, pc);
cecac1ab
MM
1178}
1179
1180/* Implement sniffer method for record_btrace_frame_unwind. */
1181
1182static int
1183record_btrace_frame_sniffer (const struct frame_unwind *self,
1184 struct frame_info *this_frame,
1185 void **this_cache)
1186{
0b722aec
MM
1187 const struct btrace_function *bfun;
1188 struct btrace_frame_cache *cache;
cecac1ab 1189 struct thread_info *tp;
0b722aec 1190 struct frame_info *next;
cecac1ab
MM
1191
1192 /* THIS_FRAME does not contain a reference to its thread. */
1193 tp = find_thread_ptid (inferior_ptid);
1194 gdb_assert (tp != NULL);
1195
0b722aec
MM
1196 bfun = NULL;
1197 next = get_next_frame (this_frame);
1198 if (next == NULL)
1199 {
1200 const struct btrace_insn_iterator *replay;
1201
1202 replay = tp->btrace.replay;
1203 if (replay != NULL)
1204 bfun = replay->function;
1205 }
1206 else
1207 {
1208 const struct btrace_function *callee;
1209
1210 callee = btrace_get_frame_function (next);
1211 if (callee != NULL && (callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1212 bfun = callee->up;
1213 }
1214
1215 if (bfun == NULL)
1216 return 0;
1217
1218 DEBUG ("[frame] sniffed frame for %s on level %d",
1219 btrace_get_bfun_name (bfun), bfun->level);
1220
1221 /* This is our frame. Initialize the frame cache. */
1222 cache = bfcache_new (this_frame);
1223 cache->tp = tp;
1224 cache->bfun = bfun;
1225
1226 *this_cache = cache;
1227 return 1;
1228}
1229
1230/* Implement sniffer method for record_btrace_tailcall_frame_unwind. */
1231
1232static int
1233record_btrace_tailcall_frame_sniffer (const struct frame_unwind *self,
1234 struct frame_info *this_frame,
1235 void **this_cache)
1236{
1237 const struct btrace_function *bfun, *callee;
1238 struct btrace_frame_cache *cache;
1239 struct frame_info *next;
1240
1241 next = get_next_frame (this_frame);
1242 if (next == NULL)
1243 return 0;
1244
1245 callee = btrace_get_frame_function (next);
1246 if (callee == NULL)
1247 return 0;
1248
1249 if ((callee->flags & BFUN_UP_LINKS_TO_TAILCALL) == 0)
1250 return 0;
1251
1252 bfun = callee->up;
1253 if (bfun == NULL)
1254 return 0;
1255
1256 DEBUG ("[frame] sniffed tailcall frame for %s on level %d",
1257 btrace_get_bfun_name (bfun), bfun->level);
1258
1259 /* This is our frame. Initialize the frame cache. */
1260 cache = bfcache_new (this_frame);
1261 cache->tp = find_thread_ptid (inferior_ptid);
1262 cache->bfun = bfun;
1263
1264 *this_cache = cache;
1265 return 1;
1266}
1267
1268static void
1269record_btrace_frame_dealloc_cache (struct frame_info *self, void *this_cache)
1270{
1271 struct btrace_frame_cache *cache;
1272 void **slot;
1273
1274 cache = this_cache;
1275
1276 slot = htab_find_slot (bfcache, cache, NO_INSERT);
1277 gdb_assert (slot != NULL);
1278
1279 htab_remove_elt (bfcache, cache);
cecac1ab
MM
1280}
1281
1282/* btrace recording does not store previous memory content, neither the stack
1283 frames content. Any unwinding would return errorneous results as the stack
1284 contents no longer matches the changed PC value restored from history.
1285 Therefore this unwinder reports any possibly unwound registers as
1286 <unavailable>. */
1287
0b722aec 1288const struct frame_unwind record_btrace_frame_unwind =
cecac1ab
MM
1289{
1290 NORMAL_FRAME,
1291 record_btrace_frame_unwind_stop_reason,
1292 record_btrace_frame_this_id,
1293 record_btrace_frame_prev_register,
1294 NULL,
0b722aec
MM
1295 record_btrace_frame_sniffer,
1296 record_btrace_frame_dealloc_cache
1297};
1298
1299const struct frame_unwind record_btrace_tailcall_frame_unwind =
1300{
1301 TAILCALL_FRAME,
1302 record_btrace_frame_unwind_stop_reason,
1303 record_btrace_frame_this_id,
1304 record_btrace_frame_prev_register,
1305 NULL,
1306 record_btrace_tailcall_frame_sniffer,
1307 record_btrace_frame_dealloc_cache
cecac1ab 1308};
b2f4cfde 1309
52834460
MM
1310/* Indicate that TP should be resumed according to FLAG. */
1311
1312static void
1313record_btrace_resume_thread (struct thread_info *tp,
1314 enum btrace_thread_flag flag)
1315{
1316 struct btrace_thread_info *btinfo;
1317
1318 DEBUG ("resuming %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flag);
1319
1320 btinfo = &tp->btrace;
1321
1322 if ((btinfo->flags & BTHR_MOVE) != 0)
1323 error (_("Thread already moving."));
1324
1325 /* Fetch the latest branch trace. */
1326 btrace_fetch (tp);
1327
1328 btinfo->flags |= flag;
1329}
1330
1331/* Find the thread to resume given a PTID. */
1332
1333static struct thread_info *
1334record_btrace_find_resume_thread (ptid_t ptid)
1335{
1336 struct thread_info *tp;
1337
1338 /* When asked to resume everything, we pick the current thread. */
1339 if (ptid_equal (minus_one_ptid, ptid) || ptid_is_pid (ptid))
1340 ptid = inferior_ptid;
1341
1342 return find_thread_ptid (ptid);
1343}
1344
1345/* Start replaying a thread. */
1346
1347static struct btrace_insn_iterator *
1348record_btrace_start_replaying (struct thread_info *tp)
1349{
1350 volatile struct gdb_exception except;
1351 struct btrace_insn_iterator *replay;
1352 struct btrace_thread_info *btinfo;
1353 int executing;
1354
1355 btinfo = &tp->btrace;
1356 replay = NULL;
1357
1358 /* We can't start replaying without trace. */
1359 if (btinfo->begin == NULL)
1360 return NULL;
1361
1362 /* Clear the executing flag to allow changes to the current frame.
1363 We are not actually running, yet. We just started a reverse execution
1364 command or a record goto command.
1365 For the latter, EXECUTING is false and this has no effect.
1366 For the former, EXECUTING is true and we're in to_wait, about to
1367 move the thread. Since we need to recompute the stack, we temporarily
1368 set EXECUTING to flase. */
1369 executing = is_executing (tp->ptid);
1370 set_executing (tp->ptid, 0);
1371
1372 /* GDB stores the current frame_id when stepping in order to detects steps
1373 into subroutines.
1374 Since frames are computed differently when we're replaying, we need to
1375 recompute those stored frames and fix them up so we can still detect
1376 subroutines after we started replaying. */
1377 TRY_CATCH (except, RETURN_MASK_ALL)
1378 {
1379 struct frame_info *frame;
1380 struct frame_id frame_id;
1381 int upd_step_frame_id, upd_step_stack_frame_id;
1382
1383 /* The current frame without replaying - computed via normal unwind. */
1384 frame = get_current_frame ();
1385 frame_id = get_frame_id (frame);
1386
1387 /* Check if we need to update any stepping-related frame id's. */
1388 upd_step_frame_id = frame_id_eq (frame_id,
1389 tp->control.step_frame_id);
1390 upd_step_stack_frame_id = frame_id_eq (frame_id,
1391 tp->control.step_stack_frame_id);
1392
1393 /* We start replaying at the end of the branch trace. This corresponds
1394 to the current instruction. */
1395 replay = xmalloc (sizeof (*replay));
1396 btrace_insn_end (replay, btinfo);
1397
1398 /* We're not replaying, yet. */
1399 gdb_assert (btinfo->replay == NULL);
1400 btinfo->replay = replay;
1401
1402 /* Make sure we're not using any stale registers. */
1403 registers_changed_ptid (tp->ptid);
1404
1405 /* The current frame with replaying - computed via btrace unwind. */
1406 frame = get_current_frame ();
1407 frame_id = get_frame_id (frame);
1408
1409 /* Replace stepping related frames where necessary. */
1410 if (upd_step_frame_id)
1411 tp->control.step_frame_id = frame_id;
1412 if (upd_step_stack_frame_id)
1413 tp->control.step_stack_frame_id = frame_id;
1414 }
1415
1416 /* Restore the previous execution state. */
1417 set_executing (tp->ptid, executing);
1418
1419 if (except.reason < 0)
1420 {
1421 xfree (btinfo->replay);
1422 btinfo->replay = NULL;
1423
1424 registers_changed_ptid (tp->ptid);
1425
1426 throw_exception (except);
1427 }
1428
1429 return replay;
1430}
1431
1432/* Stop replaying a thread. */
1433
1434static void
1435record_btrace_stop_replaying (struct thread_info *tp)
1436{
1437 struct btrace_thread_info *btinfo;
1438
1439 btinfo = &tp->btrace;
1440
1441 xfree (btinfo->replay);
1442 btinfo->replay = NULL;
1443
1444 /* Make sure we're not leaving any stale registers. */
1445 registers_changed_ptid (tp->ptid);
1446}
1447
b2f4cfde
MM
1448/* The to_resume method of target record-btrace. */
1449
1450static void
1451record_btrace_resume (struct target_ops *ops, ptid_t ptid, int step,
1452 enum gdb_signal signal)
1453{
52834460
MM
1454 struct thread_info *tp, *other;
1455 enum btrace_thread_flag flag;
1456
1457 DEBUG ("resume %s: %s", target_pid_to_str (ptid), step ? "step" : "cont");
1458
1459 tp = record_btrace_find_resume_thread (ptid);
1460 if (tp == NULL)
1461 error (_("Cannot find thread to resume."));
1462
1463 /* Stop replaying other threads if the thread to resume is not replaying. */
1464 if (!btrace_is_replaying (tp) && execution_direction != EXEC_REVERSE)
1465 ALL_THREADS (other)
1466 record_btrace_stop_replaying (other);
1467
b2f4cfde 1468 /* As long as we're not replaying, just forward the request. */
1c63c994 1469 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1470 {
1471 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1472 if (ops->to_resume != NULL)
1473 return ops->to_resume (ops, ptid, step, signal);
1474
1475 error (_("Cannot find target for stepping."));
1476 }
1477
52834460
MM
1478 /* Compute the btrace thread flag for the requested move. */
1479 if (step == 0)
1480 flag = execution_direction == EXEC_REVERSE ? BTHR_RCONT : BTHR_CONT;
1481 else
1482 flag = execution_direction == EXEC_REVERSE ? BTHR_RSTEP : BTHR_STEP;
1483
1484 /* At the moment, we only move a single thread. We could also move
1485 all threads in parallel by single-stepping each resumed thread
1486 until the first runs into an event.
1487 When we do that, we would want to continue all other threads.
1488 For now, just resume one thread to not confuse to_wait. */
1489 record_btrace_resume_thread (tp, flag);
1490
1491 /* We just indicate the resume intent here. The actual stepping happens in
1492 record_btrace_wait below. */
1493}
1494
1495/* Find a thread to move. */
1496
1497static struct thread_info *
1498record_btrace_find_thread_to_move (ptid_t ptid)
1499{
1500 struct thread_info *tp;
1501
1502 /* First check the parameter thread. */
1503 tp = find_thread_ptid (ptid);
1504 if (tp != NULL && (tp->btrace.flags & BTHR_MOVE) != 0)
1505 return tp;
1506
1507 /* Otherwise, find one other thread that has been resumed. */
1508 ALL_THREADS (tp)
1509 if ((tp->btrace.flags & BTHR_MOVE) != 0)
1510 return tp;
1511
1512 return NULL;
1513}
1514
1515/* Return a target_waitstatus indicating that we ran out of history. */
1516
1517static struct target_waitstatus
1518btrace_step_no_history (void)
1519{
1520 struct target_waitstatus status;
1521
1522 status.kind = TARGET_WAITKIND_NO_HISTORY;
1523
1524 return status;
1525}
1526
1527/* Return a target_waitstatus indicating that a step finished. */
1528
1529static struct target_waitstatus
1530btrace_step_stopped (void)
1531{
1532 struct target_waitstatus status;
1533
1534 status.kind = TARGET_WAITKIND_STOPPED;
1535 status.value.sig = GDB_SIGNAL_TRAP;
1536
1537 return status;
1538}
1539
1540/* Clear the record histories. */
1541
1542static void
1543record_btrace_clear_histories (struct btrace_thread_info *btinfo)
1544{
1545 xfree (btinfo->insn_history);
1546 xfree (btinfo->call_history);
1547
1548 btinfo->insn_history = NULL;
1549 btinfo->call_history = NULL;
1550}
1551
1552/* Step a single thread. */
1553
1554static struct target_waitstatus
1555record_btrace_step_thread (struct thread_info *tp)
1556{
1557 struct btrace_insn_iterator *replay, end;
1558 struct btrace_thread_info *btinfo;
1559 struct address_space *aspace;
1560 struct inferior *inf;
1561 enum btrace_thread_flag flags;
1562 unsigned int steps;
1563
1564 btinfo = &tp->btrace;
1565 replay = btinfo->replay;
1566
1567 flags = btinfo->flags & BTHR_MOVE;
1568 btinfo->flags &= ~BTHR_MOVE;
1569
1570 DEBUG ("stepping %d (%s): %u", tp->num, target_pid_to_str (tp->ptid), flags);
1571
1572 switch (flags)
1573 {
1574 default:
1575 internal_error (__FILE__, __LINE__, _("invalid stepping type."));
1576
1577 case BTHR_STEP:
1578 /* We're done if we're not replaying. */
1579 if (replay == NULL)
1580 return btrace_step_no_history ();
1581
1582 /* We are always able to step at least once. */
1583 steps = btrace_insn_next (replay, 1);
1584 gdb_assert (steps == 1);
1585
1586 /* Determine the end of the instruction trace. */
1587 btrace_insn_end (&end, btinfo);
1588
1589 /* We stop replaying if we reached the end of the trace. */
1590 if (btrace_insn_cmp (replay, &end) == 0)
1591 record_btrace_stop_replaying (tp);
1592
1593 return btrace_step_stopped ();
1594
1595 case BTHR_RSTEP:
1596 /* Start replaying if we're not already doing so. */
1597 if (replay == NULL)
1598 replay = record_btrace_start_replaying (tp);
1599
1600 /* If we can't step any further, we reached the end of the history. */
1601 steps = btrace_insn_prev (replay, 1);
1602 if (steps == 0)
1603 return btrace_step_no_history ();
1604
1605 return btrace_step_stopped ();
1606
1607 case BTHR_CONT:
1608 /* We're done if we're not replaying. */
1609 if (replay == NULL)
1610 return btrace_step_no_history ();
1611
1612 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1613 aspace = inf->aspace;
1614
1615 /* Determine the end of the instruction trace. */
1616 btrace_insn_end (&end, btinfo);
1617
1618 for (;;)
1619 {
1620 const struct btrace_insn *insn;
1621
1622 /* We are always able to step at least once. */
1623 steps = btrace_insn_next (replay, 1);
1624 gdb_assert (steps == 1);
1625
1626 /* We stop replaying if we reached the end of the trace. */
1627 if (btrace_insn_cmp (replay, &end) == 0)
1628 {
1629 record_btrace_stop_replaying (tp);
1630 return btrace_step_no_history ();
1631 }
1632
1633 insn = btrace_insn_get (replay);
1634 gdb_assert (insn);
1635
1636 DEBUG ("stepping %d (%s) ... %s", tp->num,
1637 target_pid_to_str (tp->ptid),
1638 core_addr_to_string_nz (insn->pc));
1639
1640 if (breakpoint_here_p (aspace, insn->pc))
1641 return btrace_step_stopped ();
1642 }
1643
1644 case BTHR_RCONT:
1645 /* Start replaying if we're not already doing so. */
1646 if (replay == NULL)
1647 replay = record_btrace_start_replaying (tp);
1648
1649 inf = find_inferior_pid (ptid_get_pid (tp->ptid));
1650 aspace = inf->aspace;
1651
1652 for (;;)
1653 {
1654 const struct btrace_insn *insn;
1655
1656 /* If we can't step any further, we're done. */
1657 steps = btrace_insn_prev (replay, 1);
1658 if (steps == 0)
1659 return btrace_step_no_history ();
1660
1661 insn = btrace_insn_get (replay);
1662 gdb_assert (insn);
1663
1664 DEBUG ("reverse-stepping %d (%s) ... %s", tp->num,
1665 target_pid_to_str (tp->ptid),
1666 core_addr_to_string_nz (insn->pc));
1667
1668 if (breakpoint_here_p (aspace, insn->pc))
1669 return btrace_step_stopped ();
1670 }
1671 }
b2f4cfde
MM
1672}
1673
1674/* The to_wait method of target record-btrace. */
1675
1676static ptid_t
1677record_btrace_wait (struct target_ops *ops, ptid_t ptid,
1678 struct target_waitstatus *status, int options)
1679{
52834460
MM
1680 struct thread_info *tp, *other;
1681
1682 DEBUG ("wait %s (0x%x)", target_pid_to_str (ptid), options);
1683
b2f4cfde 1684 /* As long as we're not replaying, just forward the request. */
1c63c994 1685 if (!record_btrace_is_replaying (ops) && execution_direction != EXEC_REVERSE)
b2f4cfde
MM
1686 {
1687 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1688 if (ops->to_wait != NULL)
1689 return ops->to_wait (ops, ptid, status, options);
1690
1691 error (_("Cannot find target for waiting."));
1692 }
1693
52834460
MM
1694 /* Let's find a thread to move. */
1695 tp = record_btrace_find_thread_to_move (ptid);
1696 if (tp == NULL)
1697 {
1698 DEBUG ("wait %s: no thread", target_pid_to_str (ptid));
1699
1700 status->kind = TARGET_WAITKIND_IGNORE;
1701 return minus_one_ptid;
1702 }
1703
1704 /* We only move a single thread. We're not able to correlate threads. */
1705 *status = record_btrace_step_thread (tp);
1706
1707 /* Stop all other threads. */
1708 if (!non_stop)
1709 ALL_THREADS (other)
1710 other->btrace.flags &= ~BTHR_MOVE;
1711
1712 /* Start record histories anew from the current position. */
1713 record_btrace_clear_histories (&tp->btrace);
1714
1715 /* We moved the replay position but did not update registers. */
1716 registers_changed_ptid (tp->ptid);
1717
1718 return tp->ptid;
1719}
1720
1721/* The to_can_execute_reverse method of target record-btrace. */
1722
1723static int
19db3e69 1724record_btrace_can_execute_reverse (struct target_ops *self)
52834460
MM
1725{
1726 return 1;
1727}
1728
1729/* The to_decr_pc_after_break method of target record-btrace. */
1730
1731static CORE_ADDR
1732record_btrace_decr_pc_after_break (struct target_ops *ops,
1733 struct gdbarch *gdbarch)
1734{
1735 /* When replaying, we do not actually execute the breakpoint instruction
1736 so there is no need to adjust the PC after hitting a breakpoint. */
1c63c994 1737 if (record_btrace_is_replaying (ops))
52834460
MM
1738 return 0;
1739
c0eca49f 1740 return ops->beneath->to_decr_pc_after_break (ops->beneath, gdbarch);
b2f4cfde
MM
1741}
1742
e2887aa3
MM
1743/* The to_find_new_threads method of target record-btrace. */
1744
1745static void
1746record_btrace_find_new_threads (struct target_ops *ops)
1747{
1748 /* Don't expect new threads if we're replaying. */
1c63c994 1749 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1750 return;
1751
1752 /* Forward the request. */
1753 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1754 if (ops->to_find_new_threads != NULL)
1755 {
1756 ops->to_find_new_threads (ops);
1757 break;
1758 }
1759}
1760
1761/* The to_thread_alive method of target record-btrace. */
1762
1763static int
1764record_btrace_thread_alive (struct target_ops *ops, ptid_t ptid)
1765{
1766 /* We don't add or remove threads during replay. */
1c63c994 1767 if (record_btrace_is_replaying (ops))
e2887aa3
MM
1768 return find_thread_ptid (ptid) != NULL;
1769
1770 /* Forward the request. */
1771 for (ops = ops->beneath; ops != NULL; ops = ops->beneath)
1772 if (ops->to_thread_alive != NULL)
1773 return ops->to_thread_alive (ops, ptid);
1774
1775 return 0;
1776}
1777
066ce621
MM
1778/* Set the replay branch trace instruction iterator. If IT is NULL, replay
1779 is stopped. */
1780
1781static void
1782record_btrace_set_replay (struct thread_info *tp,
1783 const struct btrace_insn_iterator *it)
1784{
1785 struct btrace_thread_info *btinfo;
1786
1787 btinfo = &tp->btrace;
1788
1789 if (it == NULL || it->function == NULL)
52834460 1790 record_btrace_stop_replaying (tp);
066ce621
MM
1791 else
1792 {
1793 if (btinfo->replay == NULL)
52834460 1794 record_btrace_start_replaying (tp);
066ce621
MM
1795 else if (btrace_insn_cmp (btinfo->replay, it) == 0)
1796 return;
1797
1798 *btinfo->replay = *it;
52834460 1799 registers_changed_ptid (tp->ptid);
066ce621
MM
1800 }
1801
52834460
MM
1802 /* Start anew from the new replay position. */
1803 record_btrace_clear_histories (btinfo);
066ce621
MM
1804}
1805
1806/* The to_goto_record_begin method of target record-btrace. */
1807
1808static void
08475817 1809record_btrace_goto_begin (struct target_ops *self)
066ce621
MM
1810{
1811 struct thread_info *tp;
1812 struct btrace_insn_iterator begin;
1813
1814 tp = require_btrace_thread ();
1815
1816 btrace_insn_begin (&begin, &tp->btrace);
1817 record_btrace_set_replay (tp, &begin);
1818
1819 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1820}
1821
1822/* The to_goto_record_end method of target record-btrace. */
1823
1824static void
307a1b91 1825record_btrace_goto_end (struct target_ops *ops)
066ce621
MM
1826{
1827 struct thread_info *tp;
1828
1829 tp = require_btrace_thread ();
1830
1831 record_btrace_set_replay (tp, NULL);
1832
1833 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1834}
1835
1836/* The to_goto_record method of target record-btrace. */
1837
1838static void
606183ac 1839record_btrace_goto (struct target_ops *self, ULONGEST insn)
066ce621
MM
1840{
1841 struct thread_info *tp;
1842 struct btrace_insn_iterator it;
1843 unsigned int number;
1844 int found;
1845
1846 number = insn;
1847
1848 /* Check for wrap-arounds. */
1849 if (number != insn)
1850 error (_("Instruction number out of range."));
1851
1852 tp = require_btrace_thread ();
1853
1854 found = btrace_find_insn_by_number (&it, &tp->btrace, number);
1855 if (found == 0)
1856 error (_("No such instruction."));
1857
1858 record_btrace_set_replay (tp, &it);
1859
1860 print_stack_frame (get_selected_frame (NULL), 1, SRC_AND_LOC, 1);
1861}
1862
afedecd3
MM
1863/* Initialize the record-btrace target ops. */
1864
1865static void
1866init_record_btrace_ops (void)
1867{
1868 struct target_ops *ops;
1869
1870 ops = &record_btrace_ops;
1871 ops->to_shortname = "record-btrace";
1872 ops->to_longname = "Branch tracing target";
1873 ops->to_doc = "Collect control-flow trace and provide the execution history.";
1874 ops->to_open = record_btrace_open;
1875 ops->to_close = record_btrace_close;
1876 ops->to_detach = record_detach;
1877 ops->to_disconnect = record_disconnect;
1878 ops->to_mourn_inferior = record_mourn_inferior;
1879 ops->to_kill = record_kill;
1880 ops->to_create_inferior = find_default_create_inferior;
1881 ops->to_stop_recording = record_btrace_stop_recording;
1882 ops->to_info_record = record_btrace_info;
1883 ops->to_insn_history = record_btrace_insn_history;
1884 ops->to_insn_history_from = record_btrace_insn_history_from;
1885 ops->to_insn_history_range = record_btrace_insn_history_range;
1886 ops->to_call_history = record_btrace_call_history;
1887 ops->to_call_history_from = record_btrace_call_history_from;
1888 ops->to_call_history_range = record_btrace_call_history_range;
07bbe694 1889 ops->to_record_is_replaying = record_btrace_is_replaying;
633785ff
MM
1890 ops->to_xfer_partial = record_btrace_xfer_partial;
1891 ops->to_remove_breakpoint = record_btrace_remove_breakpoint;
1892 ops->to_insert_breakpoint = record_btrace_insert_breakpoint;
1f3ef581
MM
1893 ops->to_fetch_registers = record_btrace_fetch_registers;
1894 ops->to_store_registers = record_btrace_store_registers;
1895 ops->to_prepare_to_store = record_btrace_prepare_to_store;
cecac1ab 1896 ops->to_get_unwinder = &record_btrace_frame_unwind;
0b722aec 1897 ops->to_get_tailcall_unwinder = &record_btrace_tailcall_frame_unwind;
b2f4cfde
MM
1898 ops->to_resume = record_btrace_resume;
1899 ops->to_wait = record_btrace_wait;
e2887aa3
MM
1900 ops->to_find_new_threads = record_btrace_find_new_threads;
1901 ops->to_thread_alive = record_btrace_thread_alive;
066ce621
MM
1902 ops->to_goto_record_begin = record_btrace_goto_begin;
1903 ops->to_goto_record_end = record_btrace_goto_end;
1904 ops->to_goto_record = record_btrace_goto;
52834460
MM
1905 ops->to_can_execute_reverse = record_btrace_can_execute_reverse;
1906 ops->to_decr_pc_after_break = record_btrace_decr_pc_after_break;
afedecd3
MM
1907 ops->to_stratum = record_stratum;
1908 ops->to_magic = OPS_MAGIC;
1909}
1910
1911/* Alias for "target record". */
1912
1913static void
1914cmd_record_btrace_start (char *args, int from_tty)
1915{
1916 if (args != NULL && *args != 0)
1917 error (_("Invalid argument."));
1918
1919 execute_command ("target record-btrace", from_tty);
1920}
1921
1922void _initialize_record_btrace (void);
1923
1924/* Initialize btrace commands. */
1925
1926void
1927_initialize_record_btrace (void)
1928{
1929 add_cmd ("btrace", class_obscure, cmd_record_btrace_start,
1930 _("Start branch trace recording."),
1931 &record_cmdlist);
1932 add_alias_cmd ("b", "btrace", class_obscure, 1, &record_cmdlist);
1933
1934 init_record_btrace_ops ();
1935 add_target (&record_btrace_ops);
0b722aec
MM
1936
1937 bfcache = htab_create_alloc (50, bfcache_hash, bfcache_eq, NULL,
1938 xcalloc, xfree);
afedecd3 1939}