]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blob - gdb/btrace.c
btrace: work around _dl_runtime_resolve returning to resolved function
[thirdparty/binutils-gdb.git] / gdb / btrace.c
1 /* Branch trace support for GDB, the GNU debugger.
2
3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
22 #include "defs.h"
23 #include "btrace.h"
24 #include "gdbthread.h"
25 #include "inferior.h"
26 #include "target.h"
27 #include "record.h"
28 #include "symtab.h"
29 #include "disasm.h"
30 #include "source.h"
31 #include "filenames.h"
32 #include "xml-support.h"
33 #include "regcache.h"
34
35 /* Print a record debug message. Use do ... while (0) to avoid ambiguities
36 when used in if statements. */
37
38 #define DEBUG(msg, args...) \
39 do \
40 { \
41 if (record_debug != 0) \
42 fprintf_unfiltered (gdb_stdlog, \
43 "[btrace] " msg "\n", ##args); \
44 } \
45 while (0)
46
47 #define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
48
49 /* Return the function name of a recorded function segment for printing.
50 This function never returns NULL. */
51
52 static const char *
53 ftrace_print_function_name (const struct btrace_function *bfun)
54 {
55 struct minimal_symbol *msym;
56 struct symbol *sym;
57
58 msym = bfun->msym;
59 sym = bfun->sym;
60
61 if (sym != NULL)
62 return SYMBOL_PRINT_NAME (sym);
63
64 if (msym != NULL)
65 return MSYMBOL_PRINT_NAME (msym);
66
67 return "<unknown>";
68 }
69
70 /* Return the file name of a recorded function segment for printing.
71 This function never returns NULL. */
72
73 static const char *
74 ftrace_print_filename (const struct btrace_function *bfun)
75 {
76 struct symbol *sym;
77 const char *filename;
78
79 sym = bfun->sym;
80
81 if (sym != NULL)
82 filename = symtab_to_filename_for_display (symbol_symtab (sym));
83 else
84 filename = "<unknown>";
85
86 return filename;
87 }
88
89 /* Return a string representation of the address of an instruction.
90 This function never returns NULL. */
91
92 static const char *
93 ftrace_print_insn_addr (const struct btrace_insn *insn)
94 {
95 if (insn == NULL)
96 return "<nil>";
97
98 return core_addr_to_string_nz (insn->pc);
99 }
100
101 /* Print an ftrace debug status message. */
102
103 static void
104 ftrace_debug (const struct btrace_function *bfun, const char *prefix)
105 {
106 const char *fun, *file;
107 unsigned int ibegin, iend;
108 int level;
109
110 fun = ftrace_print_function_name (bfun);
111 file = ftrace_print_filename (bfun);
112 level = bfun->level;
113
114 ibegin = bfun->insn_offset;
115 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
116
117 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
118 prefix, fun, file, level, ibegin, iend);
119 }
120
121 /* Return non-zero if BFUN does not match MFUN and FUN,
122 return zero otherwise. */
123
124 static int
125 ftrace_function_switched (const struct btrace_function *bfun,
126 const struct minimal_symbol *mfun,
127 const struct symbol *fun)
128 {
129 struct minimal_symbol *msym;
130 struct symbol *sym;
131
132 msym = bfun->msym;
133 sym = bfun->sym;
134
135 /* If the minimal symbol changed, we certainly switched functions. */
136 if (mfun != NULL && msym != NULL
137 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
138 return 1;
139
140 /* If the symbol changed, we certainly switched functions. */
141 if (fun != NULL && sym != NULL)
142 {
143 const char *bfname, *fname;
144
145 /* Check the function name. */
146 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
147 return 1;
148
149 /* Check the location of those functions, as well. */
150 bfname = symtab_to_fullname (symbol_symtab (sym));
151 fname = symtab_to_fullname (symbol_symtab (fun));
152 if (filename_cmp (fname, bfname) != 0)
153 return 1;
154 }
155
156 /* If we lost symbol information, we switched functions. */
157 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
158 return 1;
159
160 /* If we gained symbol information, we switched functions. */
161 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
162 return 1;
163
164 return 0;
165 }
166
167 /* Allocate and initialize a new branch trace function segment.
168 PREV is the chronologically preceding function segment.
169 MFUN and FUN are the symbol information we have for this function. */
170
171 static struct btrace_function *
172 ftrace_new_function (struct btrace_function *prev,
173 struct minimal_symbol *mfun,
174 struct symbol *fun)
175 {
176 struct btrace_function *bfun;
177
178 bfun = xzalloc (sizeof (*bfun));
179
180 bfun->msym = mfun;
181 bfun->sym = fun;
182 bfun->flow.prev = prev;
183
184 if (prev == NULL)
185 {
186 /* Start counting at one. */
187 bfun->number = 1;
188 bfun->insn_offset = 1;
189 }
190 else
191 {
192 gdb_assert (prev->flow.next == NULL);
193 prev->flow.next = bfun;
194
195 bfun->number = prev->number + 1;
196 bfun->insn_offset = (prev->insn_offset
197 + VEC_length (btrace_insn_s, prev->insn));
198 bfun->level = prev->level;
199 }
200
201 return bfun;
202 }
203
204 /* Update the UP field of a function segment. */
205
206 static void
207 ftrace_update_caller (struct btrace_function *bfun,
208 struct btrace_function *caller,
209 enum btrace_function_flag flags)
210 {
211 if (bfun->up != NULL)
212 ftrace_debug (bfun, "updating caller");
213
214 bfun->up = caller;
215 bfun->flags = flags;
216
217 ftrace_debug (bfun, "set caller");
218 }
219
220 /* Fix up the caller for all segments of a function. */
221
222 static void
223 ftrace_fixup_caller (struct btrace_function *bfun,
224 struct btrace_function *caller,
225 enum btrace_function_flag flags)
226 {
227 struct btrace_function *prev, *next;
228
229 ftrace_update_caller (bfun, caller, flags);
230
231 /* Update all function segments belonging to the same function. */
232 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
233 ftrace_update_caller (prev, caller, flags);
234
235 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
236 ftrace_update_caller (next, caller, flags);
237 }
238
239 /* Add a new function segment for a call.
240 CALLER is the chronologically preceding function segment.
241 MFUN and FUN are the symbol information we have for this function. */
242
243 static struct btrace_function *
244 ftrace_new_call (struct btrace_function *caller,
245 struct minimal_symbol *mfun,
246 struct symbol *fun)
247 {
248 struct btrace_function *bfun;
249
250 bfun = ftrace_new_function (caller, mfun, fun);
251 bfun->up = caller;
252 bfun->level += 1;
253
254 ftrace_debug (bfun, "new call");
255
256 return bfun;
257 }
258
259 /* Add a new function segment for a tail call.
260 CALLER is the chronologically preceding function segment.
261 MFUN and FUN are the symbol information we have for this function. */
262
263 static struct btrace_function *
264 ftrace_new_tailcall (struct btrace_function *caller,
265 struct minimal_symbol *mfun,
266 struct symbol *fun)
267 {
268 struct btrace_function *bfun;
269
270 bfun = ftrace_new_function (caller, mfun, fun);
271 bfun->up = caller;
272 bfun->level += 1;
273 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
274
275 ftrace_debug (bfun, "new tail call");
276
277 return bfun;
278 }
279
280 /* Find the innermost caller in the back trace of BFUN with MFUN/FUN
281 symbol information. */
282
283 static struct btrace_function *
284 ftrace_find_caller (struct btrace_function *bfun,
285 struct minimal_symbol *mfun,
286 struct symbol *fun)
287 {
288 for (; bfun != NULL; bfun = bfun->up)
289 {
290 /* Skip functions with incompatible symbol information. */
291 if (ftrace_function_switched (bfun, mfun, fun))
292 continue;
293
294 /* This is the function segment we're looking for. */
295 break;
296 }
297
298 return bfun;
299 }
300
301 /* Find the innermost caller in the back trace of BFUN, skipping all
302 function segments that do not end with a call instruction (e.g.
303 tail calls ending with a jump). */
304
305 static struct btrace_function *
306 ftrace_find_call (struct btrace_function *bfun)
307 {
308 for (; bfun != NULL; bfun = bfun->up)
309 {
310 struct btrace_insn *last;
311
312 /* Skip gaps. */
313 if (bfun->errcode != 0)
314 continue;
315
316 last = VEC_last (btrace_insn_s, bfun->insn);
317
318 if (last->iclass == BTRACE_INSN_CALL)
319 break;
320 }
321
322 return bfun;
323 }
324
325 /* Add a continuation segment for a function into which we return.
326 PREV is the chronologically preceding function segment.
327 MFUN and FUN are the symbol information we have for this function. */
328
329 static struct btrace_function *
330 ftrace_new_return (struct btrace_function *prev,
331 struct minimal_symbol *mfun,
332 struct symbol *fun)
333 {
334 struct btrace_function *bfun, *caller;
335
336 bfun = ftrace_new_function (prev, mfun, fun);
337
338 /* It is important to start at PREV's caller. Otherwise, we might find
339 PREV itself, if PREV is a recursive function. */
340 caller = ftrace_find_caller (prev->up, mfun, fun);
341 if (caller != NULL)
342 {
343 /* The caller of PREV is the preceding btrace function segment in this
344 function instance. */
345 gdb_assert (caller->segment.next == NULL);
346
347 caller->segment.next = bfun;
348 bfun->segment.prev = caller;
349
350 /* Maintain the function level. */
351 bfun->level = caller->level;
352
353 /* Maintain the call stack. */
354 bfun->up = caller->up;
355 bfun->flags = caller->flags;
356
357 ftrace_debug (bfun, "new return");
358 }
359 else
360 {
361 /* We did not find a caller. This could mean that something went
362 wrong or that the call is simply not included in the trace. */
363
364 /* Let's search for some actual call. */
365 caller = ftrace_find_call (prev->up);
366 if (caller == NULL)
367 {
368 /* There is no call in PREV's back trace. We assume that the
369 branch trace did not include it. */
370
371 /* Let's find the topmost call function - this skips tail calls. */
372 while (prev->up != NULL)
373 prev = prev->up;
374
375 /* We maintain levels for a series of returns for which we have
376 not seen the calls.
377 We start at the preceding function's level in case this has
378 already been a return for which we have not seen the call.
379 We start at level 0 otherwise, to handle tail calls correctly. */
380 bfun->level = min (0, prev->level) - 1;
381
382 /* Fix up the call stack for PREV. */
383 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
384
385 ftrace_debug (bfun, "new return - no caller");
386 }
387 else
388 {
389 /* There is a call in PREV's back trace to which we should have
390 returned. Let's remain at this level. */
391 bfun->level = prev->level;
392
393 ftrace_debug (bfun, "new return - unknown caller");
394 }
395 }
396
397 return bfun;
398 }
399
400 /* Add a new function segment for a function switch.
401 PREV is the chronologically preceding function segment.
402 MFUN and FUN are the symbol information we have for this function. */
403
404 static struct btrace_function *
405 ftrace_new_switch (struct btrace_function *prev,
406 struct minimal_symbol *mfun,
407 struct symbol *fun)
408 {
409 struct btrace_function *bfun;
410
411 /* This is an unexplained function switch. The call stack will likely
412 be wrong at this point. */
413 bfun = ftrace_new_function (prev, mfun, fun);
414
415 ftrace_debug (bfun, "new switch");
416
417 return bfun;
418 }
419
420 /* Add a new function segment for a gap in the trace due to a decode error.
421 PREV is the chronologically preceding function segment.
422 ERRCODE is the format-specific error code. */
423
424 static struct btrace_function *
425 ftrace_new_gap (struct btrace_function *prev, int errcode)
426 {
427 struct btrace_function *bfun;
428
429 /* We hijack prev if it was empty. */
430 if (prev != NULL && prev->errcode == 0
431 && VEC_empty (btrace_insn_s, prev->insn))
432 bfun = prev;
433 else
434 bfun = ftrace_new_function (prev, NULL, NULL);
435
436 bfun->errcode = errcode;
437
438 ftrace_debug (bfun, "new gap");
439
440 return bfun;
441 }
442
443 /* Update BFUN with respect to the instruction at PC. This may create new
444 function segments.
445 Return the chronologically latest function segment, never NULL. */
446
447 static struct btrace_function *
448 ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
449 {
450 struct bound_minimal_symbol bmfun;
451 struct minimal_symbol *mfun;
452 struct symbol *fun;
453 struct btrace_insn *last;
454
455 /* Try to determine the function we're in. We use both types of symbols
456 to avoid surprises when we sometimes get a full symbol and sometimes
457 only a minimal symbol. */
458 fun = find_pc_function (pc);
459 bmfun = lookup_minimal_symbol_by_pc (pc);
460 mfun = bmfun.minsym;
461
462 if (fun == NULL && mfun == NULL)
463 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
464
465 /* If we didn't have a function or if we had a gap before, we create one. */
466 if (bfun == NULL || bfun->errcode != 0)
467 return ftrace_new_function (bfun, mfun, fun);
468
469 /* Check the last instruction, if we have one.
470 We do this check first, since it allows us to fill in the call stack
471 links in addition to the normal flow links. */
472 last = NULL;
473 if (!VEC_empty (btrace_insn_s, bfun->insn))
474 last = VEC_last (btrace_insn_s, bfun->insn);
475
476 if (last != NULL)
477 {
478 switch (last->iclass)
479 {
480 case BTRACE_INSN_RETURN:
481 {
482 const char *fname;
483
484 /* On some systems, _dl_runtime_resolve returns to the resolved
485 function instead of jumping to it. From our perspective,
486 however, this is a tailcall.
487 If we treated it as return, we wouldn't be able to find the
488 resolved function in our stack back trace. Hence, we would
489 lose the current stack back trace and start anew with an empty
490 back trace. When the resolved function returns, we would then
491 create a stack back trace with the same function names but
492 different frame id's. This will confuse stepping. */
493 fname = ftrace_print_function_name (bfun);
494 if (strcmp (fname, "_dl_runtime_resolve") == 0)
495 return ftrace_new_tailcall (bfun, mfun, fun);
496
497 return ftrace_new_return (bfun, mfun, fun);
498 }
499
500 case BTRACE_INSN_CALL:
501 /* Ignore calls to the next instruction. They are used for PIC. */
502 if (last->pc + last->size == pc)
503 break;
504
505 return ftrace_new_call (bfun, mfun, fun);
506
507 case BTRACE_INSN_JUMP:
508 {
509 CORE_ADDR start;
510
511 start = get_pc_function_start (pc);
512
513 /* If we can't determine the function for PC, we treat a jump at
514 the end of the block as tail call. */
515 if (start == 0 || start == pc)
516 return ftrace_new_tailcall (bfun, mfun, fun);
517 }
518 }
519 }
520
521 /* Check if we're switching functions for some other reason. */
522 if (ftrace_function_switched (bfun, mfun, fun))
523 {
524 DEBUG_FTRACE ("switching from %s in %s at %s",
525 ftrace_print_insn_addr (last),
526 ftrace_print_function_name (bfun),
527 ftrace_print_filename (bfun));
528
529 return ftrace_new_switch (bfun, mfun, fun);
530 }
531
532 return bfun;
533 }
534
535 /* Add the instruction at PC to BFUN's instructions. */
536
537 static void
538 ftrace_update_insns (struct btrace_function *bfun,
539 const struct btrace_insn *insn)
540 {
541 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
542
543 if (record_debug > 1)
544 ftrace_debug (bfun, "update insn");
545 }
546
547 /* Classify the instruction at PC. */
548
549 static enum btrace_insn_class
550 ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
551 {
552 volatile struct gdb_exception error;
553 enum btrace_insn_class iclass;
554
555 iclass = BTRACE_INSN_OTHER;
556 TRY_CATCH (error, RETURN_MASK_ERROR)
557 {
558 if (gdbarch_insn_is_call (gdbarch, pc))
559 iclass = BTRACE_INSN_CALL;
560 else if (gdbarch_insn_is_ret (gdbarch, pc))
561 iclass = BTRACE_INSN_RETURN;
562 else if (gdbarch_insn_is_jump (gdbarch, pc))
563 iclass = BTRACE_INSN_JUMP;
564 }
565
566 return iclass;
567 }
568
569 /* Compute the function branch trace from BTS trace. */
570
571 static void
572 btrace_compute_ftrace_bts (struct thread_info *tp,
573 const struct btrace_data_bts *btrace)
574 {
575 struct btrace_thread_info *btinfo;
576 struct btrace_function *begin, *end;
577 struct gdbarch *gdbarch;
578 unsigned int blk, ngaps;
579 int level;
580
581 gdbarch = target_gdbarch ();
582 btinfo = &tp->btrace;
583 begin = btinfo->begin;
584 end = btinfo->end;
585 ngaps = btinfo->ngaps;
586 level = begin != NULL ? -btinfo->level : INT_MAX;
587 blk = VEC_length (btrace_block_s, btrace->blocks);
588
589 while (blk != 0)
590 {
591 btrace_block_s *block;
592 CORE_ADDR pc;
593
594 blk -= 1;
595
596 block = VEC_index (btrace_block_s, btrace->blocks, blk);
597 pc = block->begin;
598
599 for (;;)
600 {
601 volatile struct gdb_exception error;
602 struct btrace_insn insn;
603 int size;
604
605 /* We should hit the end of the block. Warn if we went too far. */
606 if (block->end < pc)
607 {
608 /* Indicate the gap in the trace - unless we're at the
609 beginning. */
610 if (begin != NULL)
611 {
612 warning (_("Recorded trace may be corrupted around %s."),
613 core_addr_to_string_nz (pc));
614
615 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
616 ngaps += 1;
617 }
618 break;
619 }
620
621 end = ftrace_update_function (end, pc);
622 if (begin == NULL)
623 begin = end;
624
625 /* Maintain the function level offset.
626 For all but the last block, we do it here. */
627 if (blk != 0)
628 level = min (level, end->level);
629
630 size = 0;
631 TRY_CATCH (error, RETURN_MASK_ERROR)
632 size = gdb_insn_length (gdbarch, pc);
633
634 insn.pc = pc;
635 insn.size = size;
636 insn.iclass = ftrace_classify_insn (gdbarch, pc);
637
638 ftrace_update_insns (end, &insn);
639
640 /* We're done once we pushed the instruction at the end. */
641 if (block->end == pc)
642 break;
643
644 /* We can't continue if we fail to compute the size. */
645 if (size <= 0)
646 {
647 warning (_("Recorded trace may be incomplete around %s."),
648 core_addr_to_string_nz (pc));
649
650 /* Indicate the gap in the trace. We just added INSN so we're
651 not at the beginning. */
652 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
653 ngaps += 1;
654
655 break;
656 }
657
658 pc += size;
659
660 /* Maintain the function level offset.
661 For the last block, we do it here to not consider the last
662 instruction.
663 Since the last instruction corresponds to the current instruction
664 and is not really part of the execution history, it shouldn't
665 affect the level. */
666 if (blk == 0)
667 level = min (level, end->level);
668 }
669 }
670
671 btinfo->begin = begin;
672 btinfo->end = end;
673 btinfo->ngaps = ngaps;
674
675 /* LEVEL is the minimal function level of all btrace function segments.
676 Define the global level offset to -LEVEL so all function levels are
677 normalized to start at zero. */
678 btinfo->level = -level;
679 }
680
681 /* Compute the function branch trace from a block branch trace BTRACE for
682 a thread given by BTINFO. */
683
684 static void
685 btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
686 {
687 DEBUG ("compute ftrace");
688
689 switch (btrace->format)
690 {
691 case BTRACE_FORMAT_NONE:
692 return;
693
694 case BTRACE_FORMAT_BTS:
695 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
696 return;
697 }
698
699 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
700 }
701
702 /* Add an entry for the current PC. */
703
704 static void
705 btrace_add_pc (struct thread_info *tp)
706 {
707 struct btrace_data btrace;
708 struct btrace_block *block;
709 struct regcache *regcache;
710 struct cleanup *cleanup;
711 CORE_ADDR pc;
712
713 regcache = get_thread_regcache (tp->ptid);
714 pc = regcache_read_pc (regcache);
715
716 btrace_data_init (&btrace);
717 btrace.format = BTRACE_FORMAT_BTS;
718 btrace.variant.bts.blocks = NULL;
719
720 cleanup = make_cleanup_btrace_data (&btrace);
721
722 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
723 block->begin = pc;
724 block->end = pc;
725
726 btrace_compute_ftrace (tp, &btrace);
727
728 do_cleanups (cleanup);
729 }
730
731 /* See btrace.h. */
732
733 void
734 btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
735 {
736 if (tp->btrace.target != NULL)
737 return;
738
739 if (!target_supports_btrace (conf->format))
740 error (_("Target does not support branch tracing."));
741
742 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
743
744 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
745
746 /* Add an entry for the current PC so we start tracing from where we
747 enabled it. */
748 if (tp->btrace.target != NULL)
749 btrace_add_pc (tp);
750 }
751
752 /* See btrace.h. */
753
754 const struct btrace_config *
755 btrace_conf (const struct btrace_thread_info *btinfo)
756 {
757 if (btinfo->target == NULL)
758 return NULL;
759
760 return target_btrace_conf (btinfo->target);
761 }
762
763 /* See btrace.h. */
764
765 void
766 btrace_disable (struct thread_info *tp)
767 {
768 struct btrace_thread_info *btp = &tp->btrace;
769 int errcode = 0;
770
771 if (btp->target == NULL)
772 return;
773
774 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
775
776 target_disable_btrace (btp->target);
777 btp->target = NULL;
778
779 btrace_clear (tp);
780 }
781
782 /* See btrace.h. */
783
784 void
785 btrace_teardown (struct thread_info *tp)
786 {
787 struct btrace_thread_info *btp = &tp->btrace;
788 int errcode = 0;
789
790 if (btp->target == NULL)
791 return;
792
793 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
794
795 target_teardown_btrace (btp->target);
796 btp->target = NULL;
797
798 btrace_clear (tp);
799 }
800
801 /* Stitch branch trace in BTS format. */
802
803 static int
804 btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
805 {
806 struct btrace_thread_info *btinfo;
807 struct btrace_function *last_bfun;
808 struct btrace_insn *last_insn;
809 btrace_block_s *first_new_block;
810
811 btinfo = &tp->btrace;
812 last_bfun = btinfo->end;
813 gdb_assert (last_bfun != NULL);
814 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
815
816 /* If the existing trace ends with a gap, we just glue the traces
817 together. We need to drop the last (i.e. chronologically first) block
818 of the new trace, though, since we can't fill in the start address.*/
819 if (VEC_empty (btrace_insn_s, last_bfun->insn))
820 {
821 VEC_pop (btrace_block_s, btrace->blocks);
822 return 0;
823 }
824
825 /* Beware that block trace starts with the most recent block, so the
826 chronologically first block in the new trace is the last block in
827 the new trace's block vector. */
828 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
829 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
830
831 /* If the current PC at the end of the block is the same as in our current
832 trace, there are two explanations:
833 1. we executed the instruction and some branch brought us back.
834 2. we have not made any progress.
835 In the first case, the delta trace vector should contain at least two
836 entries.
837 In the second case, the delta trace vector should contain exactly one
838 entry for the partial block containing the current PC. Remove it. */
839 if (first_new_block->end == last_insn->pc
840 && VEC_length (btrace_block_s, btrace->blocks) == 1)
841 {
842 VEC_pop (btrace_block_s, btrace->blocks);
843 return 0;
844 }
845
846 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
847 core_addr_to_string_nz (first_new_block->end));
848
849 /* Do a simple sanity check to make sure we don't accidentally end up
850 with a bad block. This should not occur in practice. */
851 if (first_new_block->end < last_insn->pc)
852 {
853 warning (_("Error while trying to read delta trace. Falling back to "
854 "a full read."));
855 return -1;
856 }
857
858 /* We adjust the last block to start at the end of our current trace. */
859 gdb_assert (first_new_block->begin == 0);
860 first_new_block->begin = last_insn->pc;
861
862 /* We simply pop the last insn so we can insert it again as part of
863 the normal branch trace computation.
864 Since instruction iterators are based on indices in the instructions
865 vector, we don't leave any pointers dangling. */
866 DEBUG ("pruning insn at %s for stitching",
867 ftrace_print_insn_addr (last_insn));
868
869 VEC_pop (btrace_insn_s, last_bfun->insn);
870
871 /* The instructions vector may become empty temporarily if this has
872 been the only instruction in this function segment.
873 This violates the invariant but will be remedied shortly by
874 btrace_compute_ftrace when we add the new trace. */
875
876 /* The only case where this would hurt is if the entire trace consisted
877 of just that one instruction. If we remove it, we might turn the now
878 empty btrace function segment into a gap. But we don't want gaps at
879 the beginning. To avoid this, we remove the entire old trace. */
880 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
881 btrace_clear (tp);
882
883 return 0;
884 }
885
886 /* Adjust the block trace in order to stitch old and new trace together.
887 BTRACE is the new delta trace between the last and the current stop.
888 TP is the traced thread.
889 May modifx BTRACE as well as the existing trace in TP.
890 Return 0 on success, -1 otherwise. */
891
892 static int
893 btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
894 {
895 /* If we don't have trace, there's nothing to do. */
896 if (btrace_data_empty (btrace))
897 return 0;
898
899 switch (btrace->format)
900 {
901 case BTRACE_FORMAT_NONE:
902 return 0;
903
904 case BTRACE_FORMAT_BTS:
905 return btrace_stitch_bts (&btrace->variant.bts, tp);
906 }
907
908 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
909 }
910
911 /* Clear the branch trace histories in BTINFO. */
912
913 static void
914 btrace_clear_history (struct btrace_thread_info *btinfo)
915 {
916 xfree (btinfo->insn_history);
917 xfree (btinfo->call_history);
918 xfree (btinfo->replay);
919
920 btinfo->insn_history = NULL;
921 btinfo->call_history = NULL;
922 btinfo->replay = NULL;
923 }
924
925 /* See btrace.h. */
926
927 void
928 btrace_fetch (struct thread_info *tp)
929 {
930 struct btrace_thread_info *btinfo;
931 struct btrace_target_info *tinfo;
932 struct btrace_data btrace;
933 struct cleanup *cleanup;
934 int errcode;
935
936 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
937
938 btinfo = &tp->btrace;
939 tinfo = btinfo->target;
940 if (tinfo == NULL)
941 return;
942
943 /* There's no way we could get new trace while replaying.
944 On the other hand, delta trace would return a partial record with the
945 current PC, which is the replay PC, not the last PC, as expected. */
946 if (btinfo->replay != NULL)
947 return;
948
949 btrace_data_init (&btrace);
950 cleanup = make_cleanup_btrace_data (&btrace);
951
952 /* Let's first try to extend the trace we already have. */
953 if (btinfo->end != NULL)
954 {
955 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
956 if (errcode == 0)
957 {
958 /* Success. Let's try to stitch the traces together. */
959 errcode = btrace_stitch_trace (&btrace, tp);
960 }
961 else
962 {
963 /* We failed to read delta trace. Let's try to read new trace. */
964 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
965
966 /* If we got any new trace, discard what we have. */
967 if (errcode == 0 && !btrace_data_empty (&btrace))
968 btrace_clear (tp);
969 }
970
971 /* If we were not able to read the trace, we start over. */
972 if (errcode != 0)
973 {
974 btrace_clear (tp);
975 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
976 }
977 }
978 else
979 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
980
981 /* If we were not able to read the branch trace, signal an error. */
982 if (errcode != 0)
983 error (_("Failed to read branch trace."));
984
985 /* Compute the trace, provided we have any. */
986 if (!btrace_data_empty (&btrace))
987 {
988 btrace_clear_history (btinfo);
989 btrace_compute_ftrace (tp, &btrace);
990 }
991
992 do_cleanups (cleanup);
993 }
994
995 /* See btrace.h. */
996
997 void
998 btrace_clear (struct thread_info *tp)
999 {
1000 struct btrace_thread_info *btinfo;
1001 struct btrace_function *it, *trash;
1002
1003 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1004
1005 /* Make sure btrace frames that may hold a pointer into the branch
1006 trace data are destroyed. */
1007 reinit_frame_cache ();
1008
1009 btinfo = &tp->btrace;
1010
1011 it = btinfo->begin;
1012 while (it != NULL)
1013 {
1014 trash = it;
1015 it = it->flow.next;
1016
1017 xfree (trash);
1018 }
1019
1020 btinfo->begin = NULL;
1021 btinfo->end = NULL;
1022 btinfo->ngaps = 0;
1023
1024 btrace_clear_history (btinfo);
1025 }
1026
1027 /* See btrace.h. */
1028
1029 void
1030 btrace_free_objfile (struct objfile *objfile)
1031 {
1032 struct thread_info *tp;
1033
1034 DEBUG ("free objfile");
1035
1036 ALL_NON_EXITED_THREADS (tp)
1037 btrace_clear (tp);
1038 }
1039
1040 #if defined (HAVE_LIBEXPAT)
1041
1042 /* Check the btrace document version. */
1043
1044 static void
1045 check_xml_btrace_version (struct gdb_xml_parser *parser,
1046 const struct gdb_xml_element *element,
1047 void *user_data, VEC (gdb_xml_value_s) *attributes)
1048 {
1049 const char *version = xml_find_attribute (attributes, "version")->value;
1050
1051 if (strcmp (version, "1.0") != 0)
1052 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1053 }
1054
1055 /* Parse a btrace "block" xml record. */
1056
1057 static void
1058 parse_xml_btrace_block (struct gdb_xml_parser *parser,
1059 const struct gdb_xml_element *element,
1060 void *user_data, VEC (gdb_xml_value_s) *attributes)
1061 {
1062 struct btrace_data *btrace;
1063 struct btrace_block *block;
1064 ULONGEST *begin, *end;
1065
1066 btrace = user_data;
1067
1068 switch (btrace->format)
1069 {
1070 case BTRACE_FORMAT_BTS:
1071 break;
1072
1073 case BTRACE_FORMAT_NONE:
1074 btrace->format = BTRACE_FORMAT_BTS;
1075 btrace->variant.bts.blocks = NULL;
1076 break;
1077
1078 default:
1079 gdb_xml_error (parser, _("Btrace format error."));
1080 }
1081
1082 begin = xml_find_attribute (attributes, "begin")->value;
1083 end = xml_find_attribute (attributes, "end")->value;
1084
1085 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
1086 block->begin = *begin;
1087 block->end = *end;
1088 }
1089
1090 static const struct gdb_xml_attribute block_attributes[] = {
1091 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1092 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1093 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1094 };
1095
1096 static const struct gdb_xml_attribute btrace_attributes[] = {
1097 { "version", GDB_XML_AF_NONE, NULL, NULL },
1098 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1099 };
1100
1101 static const struct gdb_xml_element btrace_children[] = {
1102 { "block", block_attributes, NULL,
1103 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
1104 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1105 };
1106
1107 static const struct gdb_xml_element btrace_elements[] = {
1108 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1109 check_xml_btrace_version, NULL },
1110 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1111 };
1112
1113 #endif /* defined (HAVE_LIBEXPAT) */
1114
1115 /* See btrace.h. */
1116
1117 void
1118 parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
1119 {
1120 struct cleanup *cleanup;
1121 int errcode;
1122
1123 #if defined (HAVE_LIBEXPAT)
1124
1125 btrace->format = BTRACE_FORMAT_NONE;
1126
1127 cleanup = make_cleanup_btrace_data (btrace);
1128 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
1129 buffer, btrace);
1130 if (errcode != 0)
1131 error (_("Error parsing branch trace."));
1132
1133 /* Keep parse results. */
1134 discard_cleanups (cleanup);
1135
1136 #else /* !defined (HAVE_LIBEXPAT) */
1137
1138 error (_("Cannot process branch trace. XML parsing is not supported."));
1139
1140 #endif /* !defined (HAVE_LIBEXPAT) */
1141 }
1142
1143 #if defined (HAVE_LIBEXPAT)
1144
1145 /* Parse a btrace-conf "bts" xml record. */
1146
1147 static void
1148 parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1149 const struct gdb_xml_element *element,
1150 void *user_data, VEC (gdb_xml_value_s) *attributes)
1151 {
1152 struct btrace_config *conf;
1153 struct gdb_xml_value *size;
1154
1155 conf = user_data;
1156 conf->format = BTRACE_FORMAT_BTS;
1157 conf->bts.size = 0;
1158
1159 size = xml_find_attribute (attributes, "size");
1160 if (size != NULL)
1161 conf->bts.size = (unsigned int) * (ULONGEST *) size->value;
1162 }
1163
1164 static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1165 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1166 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1167 };
1168
1169 static const struct gdb_xml_element btrace_conf_children[] = {
1170 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1171 parse_xml_btrace_conf_bts, NULL },
1172 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1173 };
1174
1175 static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1176 { "version", GDB_XML_AF_NONE, NULL, NULL },
1177 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1178 };
1179
1180 static const struct gdb_xml_element btrace_conf_elements[] = {
1181 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1182 GDB_XML_EF_NONE, NULL, NULL },
1183 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1184 };
1185
1186 #endif /* defined (HAVE_LIBEXPAT) */
1187
1188 /* See btrace.h. */
1189
1190 void
1191 parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1192 {
1193 int errcode;
1194
1195 #if defined (HAVE_LIBEXPAT)
1196
1197 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1198 btrace_conf_elements, xml, conf);
1199 if (errcode != 0)
1200 error (_("Error parsing branch trace configuration."));
1201
1202 #else /* !defined (HAVE_LIBEXPAT) */
1203
1204 error (_("XML parsing is not supported."));
1205
1206 #endif /* !defined (HAVE_LIBEXPAT) */
1207 }
1208
1209 /* See btrace.h. */
1210
1211 const struct btrace_insn *
1212 btrace_insn_get (const struct btrace_insn_iterator *it)
1213 {
1214 const struct btrace_function *bfun;
1215 unsigned int index, end;
1216
1217 index = it->index;
1218 bfun = it->function;
1219
1220 /* Check if the iterator points to a gap in the trace. */
1221 if (bfun->errcode != 0)
1222 return NULL;
1223
1224 /* The index is within the bounds of this function's instruction vector. */
1225 end = VEC_length (btrace_insn_s, bfun->insn);
1226 gdb_assert (0 < end);
1227 gdb_assert (index < end);
1228
1229 return VEC_index (btrace_insn_s, bfun->insn, index);
1230 }
1231
1232 /* See btrace.h. */
1233
1234 unsigned int
1235 btrace_insn_number (const struct btrace_insn_iterator *it)
1236 {
1237 const struct btrace_function *bfun;
1238
1239 bfun = it->function;
1240
1241 /* Return zero if the iterator points to a gap in the trace. */
1242 if (bfun->errcode != 0)
1243 return 0;
1244
1245 return bfun->insn_offset + it->index;
1246 }
1247
1248 /* See btrace.h. */
1249
1250 void
1251 btrace_insn_begin (struct btrace_insn_iterator *it,
1252 const struct btrace_thread_info *btinfo)
1253 {
1254 const struct btrace_function *bfun;
1255
1256 bfun = btinfo->begin;
1257 if (bfun == NULL)
1258 error (_("No trace."));
1259
1260 it->function = bfun;
1261 it->index = 0;
1262 }
1263
1264 /* See btrace.h. */
1265
1266 void
1267 btrace_insn_end (struct btrace_insn_iterator *it,
1268 const struct btrace_thread_info *btinfo)
1269 {
1270 const struct btrace_function *bfun;
1271 unsigned int length;
1272
1273 bfun = btinfo->end;
1274 if (bfun == NULL)
1275 error (_("No trace."));
1276
1277 length = VEC_length (btrace_insn_s, bfun->insn);
1278
1279 /* The last function may either be a gap or it contains the current
1280 instruction, which is one past the end of the execution trace; ignore
1281 it. */
1282 if (length > 0)
1283 length -= 1;
1284
1285 it->function = bfun;
1286 it->index = length;
1287 }
1288
1289 /* See btrace.h. */
1290
1291 unsigned int
1292 btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1293 {
1294 const struct btrace_function *bfun;
1295 unsigned int index, steps;
1296
1297 bfun = it->function;
1298 steps = 0;
1299 index = it->index;
1300
1301 while (stride != 0)
1302 {
1303 unsigned int end, space, adv;
1304
1305 end = VEC_length (btrace_insn_s, bfun->insn);
1306
1307 /* An empty function segment represents a gap in the trace. We count
1308 it as one instruction. */
1309 if (end == 0)
1310 {
1311 const struct btrace_function *next;
1312
1313 next = bfun->flow.next;
1314 if (next == NULL)
1315 break;
1316
1317 stride -= 1;
1318 steps += 1;
1319
1320 bfun = next;
1321 index = 0;
1322
1323 continue;
1324 }
1325
1326 gdb_assert (0 < end);
1327 gdb_assert (index < end);
1328
1329 /* Compute the number of instructions remaining in this segment. */
1330 space = end - index;
1331
1332 /* Advance the iterator as far as possible within this segment. */
1333 adv = min (space, stride);
1334 stride -= adv;
1335 index += adv;
1336 steps += adv;
1337
1338 /* Move to the next function if we're at the end of this one. */
1339 if (index == end)
1340 {
1341 const struct btrace_function *next;
1342
1343 next = bfun->flow.next;
1344 if (next == NULL)
1345 {
1346 /* We stepped past the last function.
1347
1348 Let's adjust the index to point to the last instruction in
1349 the previous function. */
1350 index -= 1;
1351 steps -= 1;
1352 break;
1353 }
1354
1355 /* We now point to the first instruction in the new function. */
1356 bfun = next;
1357 index = 0;
1358 }
1359
1360 /* We did make progress. */
1361 gdb_assert (adv > 0);
1362 }
1363
1364 /* Update the iterator. */
1365 it->function = bfun;
1366 it->index = index;
1367
1368 return steps;
1369 }
1370
1371 /* See btrace.h. */
1372
1373 unsigned int
1374 btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1375 {
1376 const struct btrace_function *bfun;
1377 unsigned int index, steps;
1378
1379 bfun = it->function;
1380 steps = 0;
1381 index = it->index;
1382
1383 while (stride != 0)
1384 {
1385 unsigned int adv;
1386
1387 /* Move to the previous function if we're at the start of this one. */
1388 if (index == 0)
1389 {
1390 const struct btrace_function *prev;
1391
1392 prev = bfun->flow.prev;
1393 if (prev == NULL)
1394 break;
1395
1396 /* We point to one after the last instruction in the new function. */
1397 bfun = prev;
1398 index = VEC_length (btrace_insn_s, bfun->insn);
1399
1400 /* An empty function segment represents a gap in the trace. We count
1401 it as one instruction. */
1402 if (index == 0)
1403 {
1404 stride -= 1;
1405 steps += 1;
1406
1407 continue;
1408 }
1409 }
1410
1411 /* Advance the iterator as far as possible within this segment. */
1412 adv = min (index, stride);
1413
1414 stride -= adv;
1415 index -= adv;
1416 steps += adv;
1417
1418 /* We did make progress. */
1419 gdb_assert (adv > 0);
1420 }
1421
1422 /* Update the iterator. */
1423 it->function = bfun;
1424 it->index = index;
1425
1426 return steps;
1427 }
1428
1429 /* See btrace.h. */
1430
1431 int
1432 btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1433 const struct btrace_insn_iterator *rhs)
1434 {
1435 unsigned int lnum, rnum;
1436
1437 lnum = btrace_insn_number (lhs);
1438 rnum = btrace_insn_number (rhs);
1439
1440 /* A gap has an instruction number of zero. Things are getting more
1441 complicated if gaps are involved.
1442
1443 We take the instruction number offset from the iterator's function.
1444 This is the number of the first instruction after the gap.
1445
1446 This is OK as long as both lhs and rhs point to gaps. If only one of
1447 them does, we need to adjust the number based on the other's regular
1448 instruction number. Otherwise, a gap might compare equal to an
1449 instruction. */
1450
1451 if (lnum == 0 && rnum == 0)
1452 {
1453 lnum = lhs->function->insn_offset;
1454 rnum = rhs->function->insn_offset;
1455 }
1456 else if (lnum == 0)
1457 {
1458 lnum = lhs->function->insn_offset;
1459
1460 if (lnum == rnum)
1461 lnum -= 1;
1462 }
1463 else if (rnum == 0)
1464 {
1465 rnum = rhs->function->insn_offset;
1466
1467 if (rnum == lnum)
1468 rnum -= 1;
1469 }
1470
1471 return (int) (lnum - rnum);
1472 }
1473
1474 /* See btrace.h. */
1475
1476 int
1477 btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1478 const struct btrace_thread_info *btinfo,
1479 unsigned int number)
1480 {
1481 const struct btrace_function *bfun;
1482 unsigned int end, length;
1483
1484 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1485 {
1486 /* Skip gaps. */
1487 if (bfun->errcode != 0)
1488 continue;
1489
1490 if (bfun->insn_offset <= number)
1491 break;
1492 }
1493
1494 if (bfun == NULL)
1495 return 0;
1496
1497 length = VEC_length (btrace_insn_s, bfun->insn);
1498 gdb_assert (length > 0);
1499
1500 end = bfun->insn_offset + length;
1501 if (end <= number)
1502 return 0;
1503
1504 it->function = bfun;
1505 it->index = number - bfun->insn_offset;
1506
1507 return 1;
1508 }
1509
1510 /* See btrace.h. */
1511
1512 const struct btrace_function *
1513 btrace_call_get (const struct btrace_call_iterator *it)
1514 {
1515 return it->function;
1516 }
1517
1518 /* See btrace.h. */
1519
1520 unsigned int
1521 btrace_call_number (const struct btrace_call_iterator *it)
1522 {
1523 const struct btrace_thread_info *btinfo;
1524 const struct btrace_function *bfun;
1525 unsigned int insns;
1526
1527 btinfo = it->btinfo;
1528 bfun = it->function;
1529 if (bfun != NULL)
1530 return bfun->number;
1531
1532 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1533 number of the last function. */
1534 bfun = btinfo->end;
1535 insns = VEC_length (btrace_insn_s, bfun->insn);
1536
1537 /* If the function contains only a single instruction (i.e. the current
1538 instruction), it will be skipped and its number is already the number
1539 we seek. */
1540 if (insns == 1)
1541 return bfun->number;
1542
1543 /* Otherwise, return one more than the number of the last function. */
1544 return bfun->number + 1;
1545 }
1546
1547 /* See btrace.h. */
1548
1549 void
1550 btrace_call_begin (struct btrace_call_iterator *it,
1551 const struct btrace_thread_info *btinfo)
1552 {
1553 const struct btrace_function *bfun;
1554
1555 bfun = btinfo->begin;
1556 if (bfun == NULL)
1557 error (_("No trace."));
1558
1559 it->btinfo = btinfo;
1560 it->function = bfun;
1561 }
1562
1563 /* See btrace.h. */
1564
1565 void
1566 btrace_call_end (struct btrace_call_iterator *it,
1567 const struct btrace_thread_info *btinfo)
1568 {
1569 const struct btrace_function *bfun;
1570
1571 bfun = btinfo->end;
1572 if (bfun == NULL)
1573 error (_("No trace."));
1574
1575 it->btinfo = btinfo;
1576 it->function = NULL;
1577 }
1578
1579 /* See btrace.h. */
1580
1581 unsigned int
1582 btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
1583 {
1584 const struct btrace_function *bfun;
1585 unsigned int steps;
1586
1587 bfun = it->function;
1588 steps = 0;
1589 while (bfun != NULL)
1590 {
1591 const struct btrace_function *next;
1592 unsigned int insns;
1593
1594 next = bfun->flow.next;
1595 if (next == NULL)
1596 {
1597 /* Ignore the last function if it only contains a single
1598 (i.e. the current) instruction. */
1599 insns = VEC_length (btrace_insn_s, bfun->insn);
1600 if (insns == 1)
1601 steps -= 1;
1602 }
1603
1604 if (stride == steps)
1605 break;
1606
1607 bfun = next;
1608 steps += 1;
1609 }
1610
1611 it->function = bfun;
1612 return steps;
1613 }
1614
1615 /* See btrace.h. */
1616
1617 unsigned int
1618 btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
1619 {
1620 const struct btrace_thread_info *btinfo;
1621 const struct btrace_function *bfun;
1622 unsigned int steps;
1623
1624 bfun = it->function;
1625 steps = 0;
1626
1627 if (bfun == NULL)
1628 {
1629 unsigned int insns;
1630
1631 btinfo = it->btinfo;
1632 bfun = btinfo->end;
1633 if (bfun == NULL)
1634 return 0;
1635
1636 /* Ignore the last function if it only contains a single
1637 (i.e. the current) instruction. */
1638 insns = VEC_length (btrace_insn_s, bfun->insn);
1639 if (insns == 1)
1640 bfun = bfun->flow.prev;
1641
1642 if (bfun == NULL)
1643 return 0;
1644
1645 steps += 1;
1646 }
1647
1648 while (steps < stride)
1649 {
1650 const struct btrace_function *prev;
1651
1652 prev = bfun->flow.prev;
1653 if (prev == NULL)
1654 break;
1655
1656 bfun = prev;
1657 steps += 1;
1658 }
1659
1660 it->function = bfun;
1661 return steps;
1662 }
1663
1664 /* See btrace.h. */
1665
1666 int
1667 btrace_call_cmp (const struct btrace_call_iterator *lhs,
1668 const struct btrace_call_iterator *rhs)
1669 {
1670 unsigned int lnum, rnum;
1671
1672 lnum = btrace_call_number (lhs);
1673 rnum = btrace_call_number (rhs);
1674
1675 return (int) (lnum - rnum);
1676 }
1677
1678 /* See btrace.h. */
1679
1680 int
1681 btrace_find_call_by_number (struct btrace_call_iterator *it,
1682 const struct btrace_thread_info *btinfo,
1683 unsigned int number)
1684 {
1685 const struct btrace_function *bfun;
1686
1687 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
1688 {
1689 unsigned int bnum;
1690
1691 bnum = bfun->number;
1692 if (number == bnum)
1693 {
1694 it->btinfo = btinfo;
1695 it->function = bfun;
1696 return 1;
1697 }
1698
1699 /* Functions are ordered and numbered consecutively. We could bail out
1700 earlier. On the other hand, it is very unlikely that we search for
1701 a nonexistent function. */
1702 }
1703
1704 return 0;
1705 }
1706
1707 /* See btrace.h. */
1708
1709 void
1710 btrace_set_insn_history (struct btrace_thread_info *btinfo,
1711 const struct btrace_insn_iterator *begin,
1712 const struct btrace_insn_iterator *end)
1713 {
1714 if (btinfo->insn_history == NULL)
1715 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
1716
1717 btinfo->insn_history->begin = *begin;
1718 btinfo->insn_history->end = *end;
1719 }
1720
1721 /* See btrace.h. */
1722
1723 void
1724 btrace_set_call_history (struct btrace_thread_info *btinfo,
1725 const struct btrace_call_iterator *begin,
1726 const struct btrace_call_iterator *end)
1727 {
1728 gdb_assert (begin->btinfo == end->btinfo);
1729
1730 if (btinfo->call_history == NULL)
1731 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
1732
1733 btinfo->call_history->begin = *begin;
1734 btinfo->call_history->end = *end;
1735 }
1736
1737 /* See btrace.h. */
1738
1739 int
1740 btrace_is_replaying (struct thread_info *tp)
1741 {
1742 return tp->btrace.replay != NULL;
1743 }
1744
1745 /* See btrace.h. */
1746
1747 int
1748 btrace_is_empty (struct thread_info *tp)
1749 {
1750 struct btrace_insn_iterator begin, end;
1751 struct btrace_thread_info *btinfo;
1752
1753 btinfo = &tp->btrace;
1754
1755 if (btinfo->begin == NULL)
1756 return 1;
1757
1758 btrace_insn_begin (&begin, btinfo);
1759 btrace_insn_end (&end, btinfo);
1760
1761 return btrace_insn_cmp (&begin, &end) == 0;
1762 }
1763
1764 /* Forward the cleanup request. */
1765
1766 static void
1767 do_btrace_data_cleanup (void *arg)
1768 {
1769 btrace_data_fini (arg);
1770 }
1771
1772 /* See btrace.h. */
1773
1774 struct cleanup *
1775 make_cleanup_btrace_data (struct btrace_data *data)
1776 {
1777 return make_cleanup (do_btrace_data_cleanup, data);
1778 }