]> git.ipfire.org Git - thirdparty/binutils-gdb.git/blame - gdb/btrace.c
btrace, linux: use data_size and data_offset
[thirdparty/binutils-gdb.git] / gdb / btrace.c
CommitLineData
02d27625
MM
1/* Branch trace support for GDB, the GNU debugger.
2
32d0add0 3 Copyright (C) 2013-2015 Free Software Foundation, Inc.
02d27625
MM
4
5 Contributed by Intel Corp. <markus.t.metzger@intel.com>
6
7 This file is part of GDB.
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 3 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program. If not, see <http://www.gnu.org/licenses/>. */
21
d41f6d8e 22#include "defs.h"
02d27625
MM
23#include "btrace.h"
24#include "gdbthread.h"
02d27625
MM
25#include "inferior.h"
26#include "target.h"
27#include "record.h"
28#include "symtab.h"
29#include "disasm.h"
30#include "source.h"
31#include "filenames.h"
c12a2917 32#include "xml-support.h"
6e07b1d2 33#include "regcache.h"
b20a6524
MM
34#include "rsp-low.h"
35
36#include <inttypes.h>
37
38static void btrace_add_pc (struct thread_info *tp);
02d27625
MM
39
40/* Print a record debug message. Use do ... while (0) to avoid ambiguities
41 when used in if statements. */
42
43#define DEBUG(msg, args...) \
44 do \
45 { \
46 if (record_debug != 0) \
47 fprintf_unfiltered (gdb_stdlog, \
48 "[btrace] " msg "\n", ##args); \
49 } \
50 while (0)
51
52#define DEBUG_FTRACE(msg, args...) DEBUG ("[ftrace] " msg, ##args)
53
02d27625
MM
54/* Return the function name of a recorded function segment for printing.
55 This function never returns NULL. */
56
57static const char *
23a7fe75 58ftrace_print_function_name (const struct btrace_function *bfun)
02d27625
MM
59{
60 struct minimal_symbol *msym;
61 struct symbol *sym;
62
63 msym = bfun->msym;
64 sym = bfun->sym;
65
66 if (sym != NULL)
67 return SYMBOL_PRINT_NAME (sym);
68
69 if (msym != NULL)
efd66ac6 70 return MSYMBOL_PRINT_NAME (msym);
02d27625
MM
71
72 return "<unknown>";
73}
74
75/* Return the file name of a recorded function segment for printing.
76 This function never returns NULL. */
77
78static const char *
23a7fe75 79ftrace_print_filename (const struct btrace_function *bfun)
02d27625
MM
80{
81 struct symbol *sym;
82 const char *filename;
83
84 sym = bfun->sym;
85
86 if (sym != NULL)
08be3fe3 87 filename = symtab_to_filename_for_display (symbol_symtab (sym));
02d27625
MM
88 else
89 filename = "<unknown>";
90
91 return filename;
92}
93
23a7fe75
MM
94/* Return a string representation of the address of an instruction.
95 This function never returns NULL. */
02d27625 96
23a7fe75
MM
97static const char *
98ftrace_print_insn_addr (const struct btrace_insn *insn)
02d27625 99{
23a7fe75
MM
100 if (insn == NULL)
101 return "<nil>";
102
103 return core_addr_to_string_nz (insn->pc);
02d27625
MM
104}
105
23a7fe75 106/* Print an ftrace debug status message. */
02d27625
MM
107
108static void
23a7fe75 109ftrace_debug (const struct btrace_function *bfun, const char *prefix)
02d27625 110{
23a7fe75
MM
111 const char *fun, *file;
112 unsigned int ibegin, iend;
ce0dfbea 113 int level;
23a7fe75
MM
114
115 fun = ftrace_print_function_name (bfun);
116 file = ftrace_print_filename (bfun);
117 level = bfun->level;
118
23a7fe75
MM
119 ibegin = bfun->insn_offset;
120 iend = ibegin + VEC_length (btrace_insn_s, bfun->insn);
121
ce0dfbea
MM
122 DEBUG_FTRACE ("%s: fun = %s, file = %s, level = %d, insn = [%u; %u)",
123 prefix, fun, file, level, ibegin, iend);
02d27625
MM
124}
125
23a7fe75
MM
126/* Return non-zero if BFUN does not match MFUN and FUN,
127 return zero otherwise. */
02d27625
MM
128
129static int
23a7fe75
MM
130ftrace_function_switched (const struct btrace_function *bfun,
131 const struct minimal_symbol *mfun,
132 const struct symbol *fun)
02d27625
MM
133{
134 struct minimal_symbol *msym;
135 struct symbol *sym;
136
02d27625
MM
137 msym = bfun->msym;
138 sym = bfun->sym;
139
140 /* If the minimal symbol changed, we certainly switched functions. */
141 if (mfun != NULL && msym != NULL
efd66ac6 142 && strcmp (MSYMBOL_LINKAGE_NAME (mfun), MSYMBOL_LINKAGE_NAME (msym)) != 0)
02d27625
MM
143 return 1;
144
145 /* If the symbol changed, we certainly switched functions. */
146 if (fun != NULL && sym != NULL)
147 {
148 const char *bfname, *fname;
149
150 /* Check the function name. */
151 if (strcmp (SYMBOL_LINKAGE_NAME (fun), SYMBOL_LINKAGE_NAME (sym)) != 0)
152 return 1;
153
154 /* Check the location of those functions, as well. */
08be3fe3
DE
155 bfname = symtab_to_fullname (symbol_symtab (sym));
156 fname = symtab_to_fullname (symbol_symtab (fun));
02d27625
MM
157 if (filename_cmp (fname, bfname) != 0)
158 return 1;
159 }
160
23a7fe75
MM
161 /* If we lost symbol information, we switched functions. */
162 if (!(msym == NULL && sym == NULL) && mfun == NULL && fun == NULL)
163 return 1;
164
165 /* If we gained symbol information, we switched functions. */
166 if (msym == NULL && sym == NULL && !(mfun == NULL && fun == NULL))
167 return 1;
168
02d27625
MM
169 return 0;
170}
171
23a7fe75
MM
172/* Allocate and initialize a new branch trace function segment.
173 PREV is the chronologically preceding function segment.
174 MFUN and FUN are the symbol information we have for this function. */
175
176static struct btrace_function *
177ftrace_new_function (struct btrace_function *prev,
178 struct minimal_symbol *mfun,
179 struct symbol *fun)
180{
181 struct btrace_function *bfun;
182
183 bfun = xzalloc (sizeof (*bfun));
184
185 bfun->msym = mfun;
186 bfun->sym = fun;
187 bfun->flow.prev = prev;
188
5de9129b
MM
189 if (prev == NULL)
190 {
191 /* Start counting at one. */
192 bfun->number = 1;
193 bfun->insn_offset = 1;
194 }
195 else
23a7fe75
MM
196 {
197 gdb_assert (prev->flow.next == NULL);
198 prev->flow.next = bfun;
02d27625 199
23a7fe75
MM
200 bfun->number = prev->number + 1;
201 bfun->insn_offset = (prev->insn_offset
202 + VEC_length (btrace_insn_s, prev->insn));
31fd9caa 203 bfun->level = prev->level;
23a7fe75
MM
204 }
205
206 return bfun;
02d27625
MM
207}
208
23a7fe75 209/* Update the UP field of a function segment. */
02d27625 210
23a7fe75
MM
211static void
212ftrace_update_caller (struct btrace_function *bfun,
213 struct btrace_function *caller,
214 enum btrace_function_flag flags)
02d27625 215{
23a7fe75
MM
216 if (bfun->up != NULL)
217 ftrace_debug (bfun, "updating caller");
02d27625 218
23a7fe75
MM
219 bfun->up = caller;
220 bfun->flags = flags;
221
222 ftrace_debug (bfun, "set caller");
223}
224
225/* Fix up the caller for all segments of a function. */
226
227static void
228ftrace_fixup_caller (struct btrace_function *bfun,
229 struct btrace_function *caller,
230 enum btrace_function_flag flags)
231{
232 struct btrace_function *prev, *next;
233
234 ftrace_update_caller (bfun, caller, flags);
235
236 /* Update all function segments belonging to the same function. */
237 for (prev = bfun->segment.prev; prev != NULL; prev = prev->segment.prev)
238 ftrace_update_caller (prev, caller, flags);
239
240 for (next = bfun->segment.next; next != NULL; next = next->segment.next)
241 ftrace_update_caller (next, caller, flags);
242}
243
244/* Add a new function segment for a call.
245 CALLER is the chronologically preceding function segment.
246 MFUN and FUN are the symbol information we have for this function. */
247
248static struct btrace_function *
249ftrace_new_call (struct btrace_function *caller,
250 struct minimal_symbol *mfun,
251 struct symbol *fun)
252{
253 struct btrace_function *bfun;
254
255 bfun = ftrace_new_function (caller, mfun, fun);
256 bfun->up = caller;
31fd9caa 257 bfun->level += 1;
23a7fe75
MM
258
259 ftrace_debug (bfun, "new call");
260
261 return bfun;
262}
263
264/* Add a new function segment for a tail call.
265 CALLER is the chronologically preceding function segment.
266 MFUN and FUN are the symbol information we have for this function. */
267
268static struct btrace_function *
269ftrace_new_tailcall (struct btrace_function *caller,
270 struct minimal_symbol *mfun,
271 struct symbol *fun)
272{
273 struct btrace_function *bfun;
02d27625 274
23a7fe75
MM
275 bfun = ftrace_new_function (caller, mfun, fun);
276 bfun->up = caller;
31fd9caa 277 bfun->level += 1;
23a7fe75 278 bfun->flags |= BFUN_UP_LINKS_TO_TAILCALL;
02d27625 279
23a7fe75
MM
280 ftrace_debug (bfun, "new tail call");
281
282 return bfun;
283}
284
285/* Find the innermost caller in the back trace of BFUN with MFUN/FUN
286 symbol information. */
287
288static struct btrace_function *
289ftrace_find_caller (struct btrace_function *bfun,
290 struct minimal_symbol *mfun,
291 struct symbol *fun)
292{
293 for (; bfun != NULL; bfun = bfun->up)
294 {
295 /* Skip functions with incompatible symbol information. */
296 if (ftrace_function_switched (bfun, mfun, fun))
297 continue;
298
299 /* This is the function segment we're looking for. */
300 break;
301 }
302
303 return bfun;
304}
305
306/* Find the innermost caller in the back trace of BFUN, skipping all
307 function segments that do not end with a call instruction (e.g.
308 tail calls ending with a jump). */
309
310static struct btrace_function *
7d5c24b3 311ftrace_find_call (struct btrace_function *bfun)
23a7fe75
MM
312{
313 for (; bfun != NULL; bfun = bfun->up)
02d27625 314 {
23a7fe75 315 struct btrace_insn *last;
02d27625 316
31fd9caa
MM
317 /* Skip gaps. */
318 if (bfun->errcode != 0)
319 continue;
23a7fe75
MM
320
321 last = VEC_last (btrace_insn_s, bfun->insn);
02d27625 322
7d5c24b3 323 if (last->iclass == BTRACE_INSN_CALL)
23a7fe75
MM
324 break;
325 }
326
327 return bfun;
328}
329
330/* Add a continuation segment for a function into which we return.
331 PREV is the chronologically preceding function segment.
332 MFUN and FUN are the symbol information we have for this function. */
333
334static struct btrace_function *
7d5c24b3 335ftrace_new_return (struct btrace_function *prev,
23a7fe75
MM
336 struct minimal_symbol *mfun,
337 struct symbol *fun)
338{
339 struct btrace_function *bfun, *caller;
340
341 bfun = ftrace_new_function (prev, mfun, fun);
342
343 /* It is important to start at PREV's caller. Otherwise, we might find
344 PREV itself, if PREV is a recursive function. */
345 caller = ftrace_find_caller (prev->up, mfun, fun);
346 if (caller != NULL)
347 {
348 /* The caller of PREV is the preceding btrace function segment in this
349 function instance. */
350 gdb_assert (caller->segment.next == NULL);
351
352 caller->segment.next = bfun;
353 bfun->segment.prev = caller;
354
355 /* Maintain the function level. */
356 bfun->level = caller->level;
357
358 /* Maintain the call stack. */
359 bfun->up = caller->up;
360 bfun->flags = caller->flags;
361
362 ftrace_debug (bfun, "new return");
363 }
364 else
365 {
366 /* We did not find a caller. This could mean that something went
367 wrong or that the call is simply not included in the trace. */
02d27625 368
23a7fe75 369 /* Let's search for some actual call. */
7d5c24b3 370 caller = ftrace_find_call (prev->up);
23a7fe75 371 if (caller == NULL)
02d27625 372 {
23a7fe75
MM
373 /* There is no call in PREV's back trace. We assume that the
374 branch trace did not include it. */
375
376 /* Let's find the topmost call function - this skips tail calls. */
377 while (prev->up != NULL)
378 prev = prev->up;
02d27625 379
23a7fe75
MM
380 /* We maintain levels for a series of returns for which we have
381 not seen the calls.
382 We start at the preceding function's level in case this has
383 already been a return for which we have not seen the call.
384 We start at level 0 otherwise, to handle tail calls correctly. */
385 bfun->level = min (0, prev->level) - 1;
386
387 /* Fix up the call stack for PREV. */
388 ftrace_fixup_caller (prev, bfun, BFUN_UP_LINKS_TO_RET);
389
390 ftrace_debug (bfun, "new return - no caller");
391 }
392 else
02d27625 393 {
23a7fe75
MM
394 /* There is a call in PREV's back trace to which we should have
395 returned. Let's remain at this level. */
396 bfun->level = prev->level;
02d27625 397
23a7fe75 398 ftrace_debug (bfun, "new return - unknown caller");
02d27625 399 }
23a7fe75
MM
400 }
401
402 return bfun;
403}
404
405/* Add a new function segment for a function switch.
406 PREV is the chronologically preceding function segment.
407 MFUN and FUN are the symbol information we have for this function. */
408
409static struct btrace_function *
410ftrace_new_switch (struct btrace_function *prev,
411 struct minimal_symbol *mfun,
412 struct symbol *fun)
413{
414 struct btrace_function *bfun;
415
416 /* This is an unexplained function switch. The call stack will likely
417 be wrong at this point. */
418 bfun = ftrace_new_function (prev, mfun, fun);
02d27625 419
23a7fe75
MM
420 ftrace_debug (bfun, "new switch");
421
422 return bfun;
423}
424
31fd9caa
MM
425/* Add a new function segment for a gap in the trace due to a decode error.
426 PREV is the chronologically preceding function segment.
427 ERRCODE is the format-specific error code. */
428
429static struct btrace_function *
430ftrace_new_gap (struct btrace_function *prev, int errcode)
431{
432 struct btrace_function *bfun;
433
434 /* We hijack prev if it was empty. */
435 if (prev != NULL && prev->errcode == 0
436 && VEC_empty (btrace_insn_s, prev->insn))
437 bfun = prev;
438 else
439 bfun = ftrace_new_function (prev, NULL, NULL);
440
441 bfun->errcode = errcode;
442
443 ftrace_debug (bfun, "new gap");
444
445 return bfun;
446}
447
23a7fe75
MM
448/* Update BFUN with respect to the instruction at PC. This may create new
449 function segments.
450 Return the chronologically latest function segment, never NULL. */
451
452static struct btrace_function *
7d5c24b3 453ftrace_update_function (struct btrace_function *bfun, CORE_ADDR pc)
23a7fe75
MM
454{
455 struct bound_minimal_symbol bmfun;
456 struct minimal_symbol *mfun;
457 struct symbol *fun;
458 struct btrace_insn *last;
459
460 /* Try to determine the function we're in. We use both types of symbols
461 to avoid surprises when we sometimes get a full symbol and sometimes
462 only a minimal symbol. */
463 fun = find_pc_function (pc);
464 bmfun = lookup_minimal_symbol_by_pc (pc);
465 mfun = bmfun.minsym;
466
467 if (fun == NULL && mfun == NULL)
468 DEBUG_FTRACE ("no symbol at %s", core_addr_to_string_nz (pc));
469
31fd9caa
MM
470 /* If we didn't have a function or if we had a gap before, we create one. */
471 if (bfun == NULL || bfun->errcode != 0)
23a7fe75
MM
472 return ftrace_new_function (bfun, mfun, fun);
473
474 /* Check the last instruction, if we have one.
475 We do this check first, since it allows us to fill in the call stack
476 links in addition to the normal flow links. */
477 last = NULL;
478 if (!VEC_empty (btrace_insn_s, bfun->insn))
479 last = VEC_last (btrace_insn_s, bfun->insn);
480
481 if (last != NULL)
482 {
7d5c24b3
MM
483 switch (last->iclass)
484 {
485 case BTRACE_INSN_RETURN:
986b6601
MM
486 {
487 const char *fname;
488
489 /* On some systems, _dl_runtime_resolve returns to the resolved
490 function instead of jumping to it. From our perspective,
491 however, this is a tailcall.
492 If we treated it as return, we wouldn't be able to find the
493 resolved function in our stack back trace. Hence, we would
494 lose the current stack back trace and start anew with an empty
495 back trace. When the resolved function returns, we would then
496 create a stack back trace with the same function names but
497 different frame id's. This will confuse stepping. */
498 fname = ftrace_print_function_name (bfun);
499 if (strcmp (fname, "_dl_runtime_resolve") == 0)
500 return ftrace_new_tailcall (bfun, mfun, fun);
501
502 return ftrace_new_return (bfun, mfun, fun);
503 }
23a7fe75 504
7d5c24b3
MM
505 case BTRACE_INSN_CALL:
506 /* Ignore calls to the next instruction. They are used for PIC. */
507 if (last->pc + last->size == pc)
508 break;
23a7fe75 509
7d5c24b3 510 return ftrace_new_call (bfun, mfun, fun);
23a7fe75 511
7d5c24b3
MM
512 case BTRACE_INSN_JUMP:
513 {
514 CORE_ADDR start;
23a7fe75 515
7d5c24b3 516 start = get_pc_function_start (pc);
23a7fe75 517
7d5c24b3
MM
518 /* If we can't determine the function for PC, we treat a jump at
519 the end of the block as tail call. */
520 if (start == 0 || start == pc)
521 return ftrace_new_tailcall (bfun, mfun, fun);
522 }
02d27625 523 }
23a7fe75
MM
524 }
525
526 /* Check if we're switching functions for some other reason. */
527 if (ftrace_function_switched (bfun, mfun, fun))
528 {
529 DEBUG_FTRACE ("switching from %s in %s at %s",
530 ftrace_print_insn_addr (last),
531 ftrace_print_function_name (bfun),
532 ftrace_print_filename (bfun));
02d27625 533
23a7fe75
MM
534 return ftrace_new_switch (bfun, mfun, fun);
535 }
536
537 return bfun;
538}
539
23a7fe75
MM
540/* Add the instruction at PC to BFUN's instructions. */
541
542static void
7d5c24b3
MM
543ftrace_update_insns (struct btrace_function *bfun,
544 const struct btrace_insn *insn)
23a7fe75 545{
7d5c24b3 546 VEC_safe_push (btrace_insn_s, bfun->insn, insn);
23a7fe75
MM
547
548 if (record_debug > 1)
549 ftrace_debug (bfun, "update insn");
550}
551
7d5c24b3
MM
552/* Classify the instruction at PC. */
553
554static enum btrace_insn_class
555ftrace_classify_insn (struct gdbarch *gdbarch, CORE_ADDR pc)
556{
7d5c24b3
MM
557 enum btrace_insn_class iclass;
558
559 iclass = BTRACE_INSN_OTHER;
492d29ea 560 TRY
7d5c24b3
MM
561 {
562 if (gdbarch_insn_is_call (gdbarch, pc))
563 iclass = BTRACE_INSN_CALL;
564 else if (gdbarch_insn_is_ret (gdbarch, pc))
565 iclass = BTRACE_INSN_RETURN;
566 else if (gdbarch_insn_is_jump (gdbarch, pc))
567 iclass = BTRACE_INSN_JUMP;
568 }
492d29ea
PA
569 CATCH (error, RETURN_MASK_ERROR)
570 {
571 }
572 END_CATCH
7d5c24b3
MM
573
574 return iclass;
575}
576
734b0e4b 577/* Compute the function branch trace from BTS trace. */
23a7fe75
MM
578
579static void
76235df1 580btrace_compute_ftrace_bts (struct thread_info *tp,
734b0e4b 581 const struct btrace_data_bts *btrace)
23a7fe75 582{
76235df1 583 struct btrace_thread_info *btinfo;
23a7fe75
MM
584 struct btrace_function *begin, *end;
585 struct gdbarch *gdbarch;
31fd9caa 586 unsigned int blk, ngaps;
23a7fe75
MM
587 int level;
588
23a7fe75 589 gdbarch = target_gdbarch ();
76235df1 590 btinfo = &tp->btrace;
969c39fb
MM
591 begin = btinfo->begin;
592 end = btinfo->end;
31fd9caa 593 ngaps = btinfo->ngaps;
969c39fb 594 level = begin != NULL ? -btinfo->level : INT_MAX;
734b0e4b 595 blk = VEC_length (btrace_block_s, btrace->blocks);
23a7fe75
MM
596
597 while (blk != 0)
598 {
599 btrace_block_s *block;
600 CORE_ADDR pc;
601
602 blk -= 1;
603
734b0e4b 604 block = VEC_index (btrace_block_s, btrace->blocks, blk);
23a7fe75
MM
605 pc = block->begin;
606
607 for (;;)
608 {
7d5c24b3 609 struct btrace_insn insn;
23a7fe75
MM
610 int size;
611
612 /* We should hit the end of the block. Warn if we went too far. */
613 if (block->end < pc)
614 {
31fd9caa
MM
615 /* Indicate the gap in the trace - unless we're at the
616 beginning. */
617 if (begin != NULL)
618 {
619 warning (_("Recorded trace may be corrupted around %s."),
620 core_addr_to_string_nz (pc));
621
622 end = ftrace_new_gap (end, BDE_BTS_OVERFLOW);
623 ngaps += 1;
624 }
23a7fe75
MM
625 break;
626 }
627
7d5c24b3 628 end = ftrace_update_function (end, pc);
23a7fe75
MM
629 if (begin == NULL)
630 begin = end;
631
8710b709
MM
632 /* Maintain the function level offset.
633 For all but the last block, we do it here. */
634 if (blk != 0)
635 level = min (level, end->level);
23a7fe75 636
7d5c24b3 637 size = 0;
492d29ea
PA
638 TRY
639 {
640 size = gdb_insn_length (gdbarch, pc);
641 }
642 CATCH (error, RETURN_MASK_ERROR)
643 {
644 }
645 END_CATCH
7d5c24b3
MM
646
647 insn.pc = pc;
648 insn.size = size;
649 insn.iclass = ftrace_classify_insn (gdbarch, pc);
650
651 ftrace_update_insns (end, &insn);
23a7fe75
MM
652
653 /* We're done once we pushed the instruction at the end. */
654 if (block->end == pc)
655 break;
656
7d5c24b3 657 /* We can't continue if we fail to compute the size. */
23a7fe75
MM
658 if (size <= 0)
659 {
660 warning (_("Recorded trace may be incomplete around %s."),
661 core_addr_to_string_nz (pc));
31fd9caa
MM
662
663 /* Indicate the gap in the trace. We just added INSN so we're
664 not at the beginning. */
665 end = ftrace_new_gap (end, BDE_BTS_INSN_SIZE);
666 ngaps += 1;
667
23a7fe75
MM
668 break;
669 }
670
671 pc += size;
8710b709
MM
672
673 /* Maintain the function level offset.
674 For the last block, we do it here to not consider the last
675 instruction.
676 Since the last instruction corresponds to the current instruction
677 and is not really part of the execution history, it shouldn't
678 affect the level. */
679 if (blk == 0)
680 level = min (level, end->level);
23a7fe75 681 }
02d27625
MM
682 }
683
23a7fe75
MM
684 btinfo->begin = begin;
685 btinfo->end = end;
31fd9caa 686 btinfo->ngaps = ngaps;
23a7fe75
MM
687
688 /* LEVEL is the minimal function level of all btrace function segments.
689 Define the global level offset to -LEVEL so all function levels are
690 normalized to start at zero. */
691 btinfo->level = -level;
02d27625
MM
692}
693
b20a6524
MM
694#if defined (HAVE_LIBIPT)
695
696static enum btrace_insn_class
697pt_reclassify_insn (enum pt_insn_class iclass)
698{
699 switch (iclass)
700 {
701 case ptic_call:
702 return BTRACE_INSN_CALL;
703
704 case ptic_return:
705 return BTRACE_INSN_RETURN;
706
707 case ptic_jump:
708 return BTRACE_INSN_JUMP;
709
710 default:
711 return BTRACE_INSN_OTHER;
712 }
713}
714
715/* Add function branch trace using DECODER. */
716
717static void
718ftrace_add_pt (struct pt_insn_decoder *decoder,
719 struct btrace_function **pbegin,
720 struct btrace_function **pend, int *plevel,
721 unsigned int *ngaps)
722{
723 struct btrace_function *begin, *end, *upd;
724 uint64_t offset;
725 int errcode, nerrors;
726
727 begin = *pbegin;
728 end = *pend;
729 nerrors = 0;
730 for (;;)
731 {
732 struct btrace_insn btinsn;
733 struct pt_insn insn;
734
735 errcode = pt_insn_sync_forward (decoder);
736 if (errcode < 0)
737 {
738 if (errcode != -pte_eos)
739 warning (_("Failed to synchronize onto the Intel(R) Processor "
740 "Trace stream: %s."), pt_errstr (pt_errcode (errcode)));
741 break;
742 }
743
744 memset (&btinsn, 0, sizeof (btinsn));
745 for (;;)
746 {
747 errcode = pt_insn_next (decoder, &insn, sizeof(insn));
748 if (errcode < 0)
749 break;
750
751 /* Look for gaps in the trace - unless we're at the beginning. */
752 if (begin != NULL)
753 {
754 /* Tracing is disabled and re-enabled each time we enter the
755 kernel. Most times, we continue from the same instruction we
756 stopped before. This is indicated via the RESUMED instruction
757 flag. The ENABLED instruction flag means that we continued
758 from some other instruction. Indicate this as a trace gap. */
759 if (insn.enabled)
760 *pend = end = ftrace_new_gap (end, BDE_PT_DISABLED);
761
762 /* Indicate trace overflows. */
763 if (insn.resynced)
764 *pend = end = ftrace_new_gap (end, BDE_PT_OVERFLOW);
765 }
766
767 upd = ftrace_update_function (end, insn.ip);
768 if (upd != end)
769 {
770 *pend = end = upd;
771
772 if (begin == NULL)
773 *pbegin = begin = upd;
774 }
775
776 /* Maintain the function level offset. */
777 *plevel = min (*plevel, end->level);
778
779 btinsn.pc = (CORE_ADDR) insn.ip;
780 btinsn.size = (gdb_byte) insn.size;
781 btinsn.iclass = pt_reclassify_insn (insn.iclass);
782
783 ftrace_update_insns (end, &btinsn);
784 }
785
786 if (errcode == -pte_eos)
787 break;
788
789 /* If the gap is at the very beginning, we ignore it - we will have
790 less trace, but we won't have any holes in the trace. */
791 if (begin == NULL)
792 continue;
793
794 pt_insn_get_offset (decoder, &offset);
795
796 warning (_("Failed to decode Intel(R) Processor Trace near trace "
797 "offset 0x%" PRIx64 " near recorded PC 0x%" PRIx64 ": %s."),
798 offset, insn.ip, pt_errstr (pt_errcode (errcode)));
799
800 /* Indicate the gap in the trace. */
801 *pend = end = ftrace_new_gap (end, errcode);
802 *ngaps += 1;
803 }
804
805 if (nerrors > 0)
806 warning (_("The recorded execution trace may have gaps."));
807}
808
809/* A callback function to allow the trace decoder to read the inferior's
810 memory. */
811
812static int
813btrace_pt_readmem_callback (gdb_byte *buffer, size_t size,
814 const struct pt_asid *asid, CORE_ADDR pc,
815 void *context)
816{
817 int errcode;
818
819 TRY
820 {
821 errcode = target_read_code (pc, buffer, size);
822 if (errcode != 0)
823 return -pte_nomap;
824 }
825 CATCH (error, RETURN_MASK_ERROR)
826 {
827 return -pte_nomap;
828 }
829 END_CATCH
830
831 return size;
832}
833
834/* Translate the vendor from one enum to another. */
835
836static enum pt_cpu_vendor
837pt_translate_cpu_vendor (enum btrace_cpu_vendor vendor)
838{
839 switch (vendor)
840 {
841 default:
842 return pcv_unknown;
843
844 case CV_INTEL:
845 return pcv_intel;
846 }
847}
848
849/* Finalize the function branch trace after decode. */
850
851static void btrace_finalize_ftrace_pt (struct pt_insn_decoder *decoder,
852 struct thread_info *tp, int level)
853{
854 pt_insn_free_decoder (decoder);
855
856 /* LEVEL is the minimal function level of all btrace function segments.
857 Define the global level offset to -LEVEL so all function levels are
858 normalized to start at zero. */
859 tp->btrace.level = -level;
860
861 /* Add a single last instruction entry for the current PC.
862 This allows us to compute the backtrace at the current PC using both
863 standard unwind and btrace unwind.
864 This extra entry is ignored by all record commands. */
865 btrace_add_pc (tp);
866}
867
868/* Compute the function branch trace from Intel(R) Processor Trace. */
869
870static void
871btrace_compute_ftrace_pt (struct thread_info *tp,
872 const struct btrace_data_pt *btrace)
873{
874 struct btrace_thread_info *btinfo;
875 struct pt_insn_decoder *decoder;
876 struct pt_config config;
877 int level, errcode;
878
879 if (btrace->size == 0)
880 return;
881
882 btinfo = &tp->btrace;
883 level = btinfo->begin != NULL ? -btinfo->level : INT_MAX;
884
885 pt_config_init(&config);
886 config.begin = btrace->data;
887 config.end = btrace->data + btrace->size;
888
889 config.cpu.vendor = pt_translate_cpu_vendor (btrace->config.cpu.vendor);
890 config.cpu.family = btrace->config.cpu.family;
891 config.cpu.model = btrace->config.cpu.model;
892 config.cpu.stepping = btrace->config.cpu.stepping;
893
894 errcode = pt_cpu_errata (&config.errata, &config.cpu);
895 if (errcode < 0)
896 error (_("Failed to configure the Intel(R) Processor Trace decoder: %s."),
897 pt_errstr (pt_errcode (errcode)));
898
899 decoder = pt_insn_alloc_decoder (&config);
900 if (decoder == NULL)
901 error (_("Failed to allocate the Intel(R) Processor Trace decoder."));
902
903 TRY
904 {
905 struct pt_image *image;
906
907 image = pt_insn_get_image(decoder);
908 if (image == NULL)
909 error (_("Failed to configure the Intel(R) Processor Trace decoder."));
910
911 errcode = pt_image_set_callback(image, btrace_pt_readmem_callback, NULL);
912 if (errcode < 0)
913 error (_("Failed to configure the Intel(R) Processor Trace decoder: "
914 "%s."), pt_errstr (pt_errcode (errcode)));
915
916 ftrace_add_pt (decoder, &btinfo->begin, &btinfo->end, &level,
917 &btinfo->ngaps);
918 }
919 CATCH (error, RETURN_MASK_ALL)
920 {
921 /* Indicate a gap in the trace if we quit trace processing. */
922 if (error.reason == RETURN_QUIT && btinfo->end != NULL)
923 {
924 btinfo->end = ftrace_new_gap (btinfo->end, BDE_PT_USER_QUIT);
925 btinfo->ngaps++;
926 }
927
928 btrace_finalize_ftrace_pt (decoder, tp, level);
929
930 throw_exception (error);
931 }
932 END_CATCH
933
934 btrace_finalize_ftrace_pt (decoder, tp, level);
935}
936
937#else /* defined (HAVE_LIBIPT) */
938
939static void
940btrace_compute_ftrace_pt (struct thread_info *tp,
941 const struct btrace_data_pt *btrace)
942{
943 internal_error (__FILE__, __LINE__, _("Unexpected branch trace format."));
944}
945
946#endif /* defined (HAVE_LIBIPT) */
947
734b0e4b
MM
948/* Compute the function branch trace from a block branch trace BTRACE for
949 a thread given by BTINFO. */
950
951static void
76235df1 952btrace_compute_ftrace (struct thread_info *tp, struct btrace_data *btrace)
734b0e4b
MM
953{
954 DEBUG ("compute ftrace");
955
956 switch (btrace->format)
957 {
958 case BTRACE_FORMAT_NONE:
959 return;
960
961 case BTRACE_FORMAT_BTS:
76235df1 962 btrace_compute_ftrace_bts (tp, &btrace->variant.bts);
734b0e4b 963 return;
b20a6524
MM
964
965 case BTRACE_FORMAT_PT:
966 btrace_compute_ftrace_pt (tp, &btrace->variant.pt);
967 return;
734b0e4b
MM
968 }
969
970 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
971}
972
6e07b1d2
MM
973/* Add an entry for the current PC. */
974
975static void
976btrace_add_pc (struct thread_info *tp)
977{
734b0e4b 978 struct btrace_data btrace;
6e07b1d2
MM
979 struct btrace_block *block;
980 struct regcache *regcache;
981 struct cleanup *cleanup;
982 CORE_ADDR pc;
983
984 regcache = get_thread_regcache (tp->ptid);
985 pc = regcache_read_pc (regcache);
986
734b0e4b
MM
987 btrace_data_init (&btrace);
988 btrace.format = BTRACE_FORMAT_BTS;
989 btrace.variant.bts.blocks = NULL;
6e07b1d2 990
734b0e4b
MM
991 cleanup = make_cleanup_btrace_data (&btrace);
992
993 block = VEC_safe_push (btrace_block_s, btrace.variant.bts.blocks, NULL);
6e07b1d2
MM
994 block->begin = pc;
995 block->end = pc;
996
76235df1 997 btrace_compute_ftrace (tp, &btrace);
6e07b1d2
MM
998
999 do_cleanups (cleanup);
1000}
1001
02d27625
MM
1002/* See btrace.h. */
1003
1004void
f4abbc16 1005btrace_enable (struct thread_info *tp, const struct btrace_config *conf)
02d27625
MM
1006{
1007 if (tp->btrace.target != NULL)
1008 return;
1009
f4abbc16 1010 if (!target_supports_btrace (conf->format))
02d27625
MM
1011 error (_("Target does not support branch tracing."));
1012
1013 DEBUG ("enable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1014
f4abbc16 1015 tp->btrace.target = target_enable_btrace (tp->ptid, conf);
6e07b1d2
MM
1016
1017 /* Add an entry for the current PC so we start tracing from where we
1018 enabled it. */
1019 if (tp->btrace.target != NULL)
1020 btrace_add_pc (tp);
02d27625
MM
1021}
1022
1023/* See btrace.h. */
1024
f4abbc16
MM
1025const struct btrace_config *
1026btrace_conf (const struct btrace_thread_info *btinfo)
1027{
1028 if (btinfo->target == NULL)
1029 return NULL;
1030
1031 return target_btrace_conf (btinfo->target);
1032}
1033
1034/* See btrace.h. */
1035
02d27625
MM
1036void
1037btrace_disable (struct thread_info *tp)
1038{
1039 struct btrace_thread_info *btp = &tp->btrace;
1040 int errcode = 0;
1041
1042 if (btp->target == NULL)
1043 return;
1044
1045 DEBUG ("disable thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1046
1047 target_disable_btrace (btp->target);
1048 btp->target = NULL;
1049
1050 btrace_clear (tp);
1051}
1052
1053/* See btrace.h. */
1054
1055void
1056btrace_teardown (struct thread_info *tp)
1057{
1058 struct btrace_thread_info *btp = &tp->btrace;
1059 int errcode = 0;
1060
1061 if (btp->target == NULL)
1062 return;
1063
1064 DEBUG ("teardown thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1065
1066 target_teardown_btrace (btp->target);
1067 btp->target = NULL;
1068
1069 btrace_clear (tp);
1070}
1071
734b0e4b 1072/* Stitch branch trace in BTS format. */
969c39fb
MM
1073
1074static int
31fd9caa 1075btrace_stitch_bts (struct btrace_data_bts *btrace, struct thread_info *tp)
969c39fb 1076{
31fd9caa 1077 struct btrace_thread_info *btinfo;
969c39fb
MM
1078 struct btrace_function *last_bfun;
1079 struct btrace_insn *last_insn;
1080 btrace_block_s *first_new_block;
1081
31fd9caa 1082 btinfo = &tp->btrace;
969c39fb
MM
1083 last_bfun = btinfo->end;
1084 gdb_assert (last_bfun != NULL);
31fd9caa
MM
1085 gdb_assert (!VEC_empty (btrace_block_s, btrace->blocks));
1086
1087 /* If the existing trace ends with a gap, we just glue the traces
1088 together. We need to drop the last (i.e. chronologically first) block
1089 of the new trace, though, since we can't fill in the start address.*/
1090 if (VEC_empty (btrace_insn_s, last_bfun->insn))
1091 {
1092 VEC_pop (btrace_block_s, btrace->blocks);
1093 return 0;
1094 }
969c39fb
MM
1095
1096 /* Beware that block trace starts with the most recent block, so the
1097 chronologically first block in the new trace is the last block in
1098 the new trace's block vector. */
734b0e4b 1099 first_new_block = VEC_last (btrace_block_s, btrace->blocks);
969c39fb
MM
1100 last_insn = VEC_last (btrace_insn_s, last_bfun->insn);
1101
1102 /* If the current PC at the end of the block is the same as in our current
1103 trace, there are two explanations:
1104 1. we executed the instruction and some branch brought us back.
1105 2. we have not made any progress.
1106 In the first case, the delta trace vector should contain at least two
1107 entries.
1108 In the second case, the delta trace vector should contain exactly one
1109 entry for the partial block containing the current PC. Remove it. */
1110 if (first_new_block->end == last_insn->pc
734b0e4b 1111 && VEC_length (btrace_block_s, btrace->blocks) == 1)
969c39fb 1112 {
734b0e4b 1113 VEC_pop (btrace_block_s, btrace->blocks);
969c39fb
MM
1114 return 0;
1115 }
1116
1117 DEBUG ("stitching %s to %s", ftrace_print_insn_addr (last_insn),
1118 core_addr_to_string_nz (first_new_block->end));
1119
1120 /* Do a simple sanity check to make sure we don't accidentally end up
1121 with a bad block. This should not occur in practice. */
1122 if (first_new_block->end < last_insn->pc)
1123 {
1124 warning (_("Error while trying to read delta trace. Falling back to "
1125 "a full read."));
1126 return -1;
1127 }
1128
1129 /* We adjust the last block to start at the end of our current trace. */
1130 gdb_assert (first_new_block->begin == 0);
1131 first_new_block->begin = last_insn->pc;
1132
1133 /* We simply pop the last insn so we can insert it again as part of
1134 the normal branch trace computation.
1135 Since instruction iterators are based on indices in the instructions
1136 vector, we don't leave any pointers dangling. */
1137 DEBUG ("pruning insn at %s for stitching",
1138 ftrace_print_insn_addr (last_insn));
1139
1140 VEC_pop (btrace_insn_s, last_bfun->insn);
1141
1142 /* The instructions vector may become empty temporarily if this has
1143 been the only instruction in this function segment.
1144 This violates the invariant but will be remedied shortly by
1145 btrace_compute_ftrace when we add the new trace. */
31fd9caa
MM
1146
1147 /* The only case where this would hurt is if the entire trace consisted
1148 of just that one instruction. If we remove it, we might turn the now
1149 empty btrace function segment into a gap. But we don't want gaps at
1150 the beginning. To avoid this, we remove the entire old trace. */
1151 if (last_bfun == btinfo->begin && VEC_empty (btrace_insn_s, last_bfun->insn))
1152 btrace_clear (tp);
1153
969c39fb
MM
1154 return 0;
1155}
1156
734b0e4b
MM
1157/* Adjust the block trace in order to stitch old and new trace together.
1158 BTRACE is the new delta trace between the last and the current stop.
31fd9caa
MM
1159 TP is the traced thread.
1160 May modifx BTRACE as well as the existing trace in TP.
734b0e4b
MM
1161 Return 0 on success, -1 otherwise. */
1162
1163static int
31fd9caa 1164btrace_stitch_trace (struct btrace_data *btrace, struct thread_info *tp)
734b0e4b
MM
1165{
1166 /* If we don't have trace, there's nothing to do. */
1167 if (btrace_data_empty (btrace))
1168 return 0;
1169
1170 switch (btrace->format)
1171 {
1172 case BTRACE_FORMAT_NONE:
1173 return 0;
1174
1175 case BTRACE_FORMAT_BTS:
31fd9caa 1176 return btrace_stitch_bts (&btrace->variant.bts, tp);
b20a6524
MM
1177
1178 case BTRACE_FORMAT_PT:
1179 /* Delta reads are not supported. */
1180 return -1;
734b0e4b
MM
1181 }
1182
1183 internal_error (__FILE__, __LINE__, _("Unkown branch trace format."));
1184}
1185
969c39fb
MM
1186/* Clear the branch trace histories in BTINFO. */
1187
1188static void
1189btrace_clear_history (struct btrace_thread_info *btinfo)
1190{
1191 xfree (btinfo->insn_history);
1192 xfree (btinfo->call_history);
1193 xfree (btinfo->replay);
1194
1195 btinfo->insn_history = NULL;
1196 btinfo->call_history = NULL;
1197 btinfo->replay = NULL;
1198}
1199
02d27625
MM
1200/* See btrace.h. */
1201
1202void
1203btrace_fetch (struct thread_info *tp)
1204{
1205 struct btrace_thread_info *btinfo;
969c39fb 1206 struct btrace_target_info *tinfo;
734b0e4b 1207 struct btrace_data btrace;
23a7fe75 1208 struct cleanup *cleanup;
969c39fb 1209 int errcode;
02d27625
MM
1210
1211 DEBUG ("fetch thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1212
1213 btinfo = &tp->btrace;
969c39fb
MM
1214 tinfo = btinfo->target;
1215 if (tinfo == NULL)
1216 return;
1217
1218 /* There's no way we could get new trace while replaying.
1219 On the other hand, delta trace would return a partial record with the
1220 current PC, which is the replay PC, not the last PC, as expected. */
1221 if (btinfo->replay != NULL)
02d27625
MM
1222 return;
1223
734b0e4b
MM
1224 btrace_data_init (&btrace);
1225 cleanup = make_cleanup_btrace_data (&btrace);
02d27625 1226
969c39fb
MM
1227 /* Let's first try to extend the trace we already have. */
1228 if (btinfo->end != NULL)
1229 {
1230 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_DELTA);
1231 if (errcode == 0)
1232 {
1233 /* Success. Let's try to stitch the traces together. */
31fd9caa 1234 errcode = btrace_stitch_trace (&btrace, tp);
969c39fb
MM
1235 }
1236 else
1237 {
1238 /* We failed to read delta trace. Let's try to read new trace. */
1239 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_NEW);
1240
1241 /* If we got any new trace, discard what we have. */
734b0e4b 1242 if (errcode == 0 && !btrace_data_empty (&btrace))
969c39fb
MM
1243 btrace_clear (tp);
1244 }
1245
1246 /* If we were not able to read the trace, we start over. */
1247 if (errcode != 0)
1248 {
1249 btrace_clear (tp);
1250 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1251 }
1252 }
1253 else
1254 errcode = target_read_btrace (&btrace, tinfo, BTRACE_READ_ALL);
1255
1256 /* If we were not able to read the branch trace, signal an error. */
1257 if (errcode != 0)
1258 error (_("Failed to read branch trace."));
1259
1260 /* Compute the trace, provided we have any. */
734b0e4b 1261 if (!btrace_data_empty (&btrace))
23a7fe75 1262 {
969c39fb 1263 btrace_clear_history (btinfo);
76235df1 1264 btrace_compute_ftrace (tp, &btrace);
23a7fe75 1265 }
02d27625 1266
23a7fe75 1267 do_cleanups (cleanup);
02d27625
MM
1268}
1269
1270/* See btrace.h. */
1271
1272void
1273btrace_clear (struct thread_info *tp)
1274{
1275 struct btrace_thread_info *btinfo;
23a7fe75 1276 struct btrace_function *it, *trash;
02d27625
MM
1277
1278 DEBUG ("clear thread %d (%s)", tp->num, target_pid_to_str (tp->ptid));
1279
0b722aec
MM
1280 /* Make sure btrace frames that may hold a pointer into the branch
1281 trace data are destroyed. */
1282 reinit_frame_cache ();
1283
02d27625
MM
1284 btinfo = &tp->btrace;
1285
23a7fe75
MM
1286 it = btinfo->begin;
1287 while (it != NULL)
1288 {
1289 trash = it;
1290 it = it->flow.next;
02d27625 1291
23a7fe75
MM
1292 xfree (trash);
1293 }
1294
1295 btinfo->begin = NULL;
1296 btinfo->end = NULL;
31fd9caa 1297 btinfo->ngaps = 0;
23a7fe75 1298
969c39fb 1299 btrace_clear_history (btinfo);
02d27625
MM
1300}
1301
1302/* See btrace.h. */
1303
1304void
1305btrace_free_objfile (struct objfile *objfile)
1306{
1307 struct thread_info *tp;
1308
1309 DEBUG ("free objfile");
1310
034f788c 1311 ALL_NON_EXITED_THREADS (tp)
02d27625
MM
1312 btrace_clear (tp);
1313}
c12a2917
MM
1314
1315#if defined (HAVE_LIBEXPAT)
1316
1317/* Check the btrace document version. */
1318
1319static void
1320check_xml_btrace_version (struct gdb_xml_parser *parser,
1321 const struct gdb_xml_element *element,
1322 void *user_data, VEC (gdb_xml_value_s) *attributes)
1323{
1324 const char *version = xml_find_attribute (attributes, "version")->value;
1325
1326 if (strcmp (version, "1.0") != 0)
1327 gdb_xml_error (parser, _("Unsupported btrace version: \"%s\""), version);
1328}
1329
1330/* Parse a btrace "block" xml record. */
1331
1332static void
1333parse_xml_btrace_block (struct gdb_xml_parser *parser,
1334 const struct gdb_xml_element *element,
1335 void *user_data, VEC (gdb_xml_value_s) *attributes)
1336{
734b0e4b 1337 struct btrace_data *btrace;
c12a2917
MM
1338 struct btrace_block *block;
1339 ULONGEST *begin, *end;
1340
1341 btrace = user_data;
734b0e4b
MM
1342
1343 switch (btrace->format)
1344 {
1345 case BTRACE_FORMAT_BTS:
1346 break;
1347
1348 case BTRACE_FORMAT_NONE:
1349 btrace->format = BTRACE_FORMAT_BTS;
1350 btrace->variant.bts.blocks = NULL;
1351 break;
1352
1353 default:
1354 gdb_xml_error (parser, _("Btrace format error."));
1355 }
c12a2917
MM
1356
1357 begin = xml_find_attribute (attributes, "begin")->value;
1358 end = xml_find_attribute (attributes, "end")->value;
1359
734b0e4b 1360 block = VEC_safe_push (btrace_block_s, btrace->variant.bts.blocks, NULL);
c12a2917
MM
1361 block->begin = *begin;
1362 block->end = *end;
1363}
1364
b20a6524
MM
1365/* Parse a "raw" xml record. */
1366
1367static void
1368parse_xml_raw (struct gdb_xml_parser *parser, const char *body_text,
1369 gdb_byte **pdata, unsigned long *psize)
1370{
1371 struct cleanup *cleanup;
1372 gdb_byte *data, *bin;
1373 unsigned long size;
1374 size_t len;
1375
1376 len = strlen (body_text);
1377 size = len / 2;
1378
1379 if ((size_t) size * 2 != len)
1380 gdb_xml_error (parser, _("Bad raw data size."));
1381
1382 bin = data = xmalloc (size);
1383 cleanup = make_cleanup (xfree, data);
1384
1385 /* We use hex encoding - see common/rsp-low.h. */
1386 while (len > 0)
1387 {
1388 char hi, lo;
1389
1390 hi = *body_text++;
1391 lo = *body_text++;
1392
1393 if (hi == 0 || lo == 0)
1394 gdb_xml_error (parser, _("Bad hex encoding."));
1395
1396 *bin++ = fromhex (hi) * 16 + fromhex (lo);
1397 len -= 2;
1398 }
1399
1400 discard_cleanups (cleanup);
1401
1402 *pdata = data;
1403 *psize = size;
1404}
1405
1406/* Parse a btrace pt-config "cpu" xml record. */
1407
1408static void
1409parse_xml_btrace_pt_config_cpu (struct gdb_xml_parser *parser,
1410 const struct gdb_xml_element *element,
1411 void *user_data,
1412 VEC (gdb_xml_value_s) *attributes)
1413{
1414 struct btrace_data *btrace;
1415 const char *vendor;
1416 ULONGEST *family, *model, *stepping;
1417
1418 vendor = xml_find_attribute (attributes, "vendor")->value;
1419 family = xml_find_attribute (attributes, "family")->value;
1420 model = xml_find_attribute (attributes, "model")->value;
1421 stepping = xml_find_attribute (attributes, "stepping")->value;
1422
1423 btrace = user_data;
1424
1425 if (strcmp (vendor, "GenuineIntel") == 0)
1426 btrace->variant.pt.config.cpu.vendor = CV_INTEL;
1427
1428 btrace->variant.pt.config.cpu.family = *family;
1429 btrace->variant.pt.config.cpu.model = *model;
1430 btrace->variant.pt.config.cpu.stepping = *stepping;
1431}
1432
1433/* Parse a btrace pt "raw" xml record. */
1434
1435static void
1436parse_xml_btrace_pt_raw (struct gdb_xml_parser *parser,
1437 const struct gdb_xml_element *element,
1438 void *user_data, const char *body_text)
1439{
1440 struct btrace_data *btrace;
1441
1442 btrace = user_data;
1443 parse_xml_raw (parser, body_text, &btrace->variant.pt.data,
1444 &btrace->variant.pt.size);
1445}
1446
1447/* Parse a btrace "pt" xml record. */
1448
1449static void
1450parse_xml_btrace_pt (struct gdb_xml_parser *parser,
1451 const struct gdb_xml_element *element,
1452 void *user_data, VEC (gdb_xml_value_s) *attributes)
1453{
1454 struct btrace_data *btrace;
1455
1456 btrace = user_data;
1457 btrace->format = BTRACE_FORMAT_PT;
1458 btrace->variant.pt.config.cpu.vendor = CV_UNKNOWN;
1459 btrace->variant.pt.data = NULL;
1460 btrace->variant.pt.size = 0;
1461}
1462
c12a2917
MM
1463static const struct gdb_xml_attribute block_attributes[] = {
1464 { "begin", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1465 { "end", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1466 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1467};
1468
b20a6524
MM
1469static const struct gdb_xml_attribute btrace_pt_config_cpu_attributes[] = {
1470 { "vendor", GDB_XML_AF_NONE, NULL, NULL },
1471 { "family", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1472 { "model", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1473 { "stepping", GDB_XML_AF_NONE, gdb_xml_parse_attr_ulongest, NULL },
1474 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1475};
1476
1477static const struct gdb_xml_element btrace_pt_config_children[] = {
1478 { "cpu", btrace_pt_config_cpu_attributes, NULL, GDB_XML_EF_OPTIONAL,
1479 parse_xml_btrace_pt_config_cpu, NULL },
1480 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1481};
1482
1483static const struct gdb_xml_element btrace_pt_children[] = {
1484 { "pt-config", NULL, btrace_pt_config_children, GDB_XML_EF_OPTIONAL, NULL,
1485 NULL },
1486 { "raw", NULL, NULL, GDB_XML_EF_OPTIONAL, NULL, parse_xml_btrace_pt_raw },
1487 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1488};
1489
c12a2917
MM
1490static const struct gdb_xml_attribute btrace_attributes[] = {
1491 { "version", GDB_XML_AF_NONE, NULL, NULL },
1492 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1493};
1494
1495static const struct gdb_xml_element btrace_children[] = {
1496 { "block", block_attributes, NULL,
1497 GDB_XML_EF_REPEATABLE | GDB_XML_EF_OPTIONAL, parse_xml_btrace_block, NULL },
b20a6524
MM
1498 { "pt", NULL, btrace_pt_children, GDB_XML_EF_OPTIONAL, parse_xml_btrace_pt,
1499 NULL },
c12a2917
MM
1500 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1501};
1502
1503static const struct gdb_xml_element btrace_elements[] = {
1504 { "btrace", btrace_attributes, btrace_children, GDB_XML_EF_NONE,
1505 check_xml_btrace_version, NULL },
1506 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1507};
1508
1509#endif /* defined (HAVE_LIBEXPAT) */
1510
1511/* See btrace.h. */
1512
734b0e4b
MM
1513void
1514parse_xml_btrace (struct btrace_data *btrace, const char *buffer)
c12a2917 1515{
c12a2917
MM
1516 struct cleanup *cleanup;
1517 int errcode;
1518
1519#if defined (HAVE_LIBEXPAT)
1520
734b0e4b
MM
1521 btrace->format = BTRACE_FORMAT_NONE;
1522
1523 cleanup = make_cleanup_btrace_data (btrace);
c12a2917 1524 errcode = gdb_xml_parse_quick (_("btrace"), "btrace.dtd", btrace_elements,
734b0e4b 1525 buffer, btrace);
c12a2917 1526 if (errcode != 0)
969c39fb 1527 error (_("Error parsing branch trace."));
c12a2917
MM
1528
1529 /* Keep parse results. */
1530 discard_cleanups (cleanup);
1531
1532#else /* !defined (HAVE_LIBEXPAT) */
1533
1534 error (_("Cannot process branch trace. XML parsing is not supported."));
1535
1536#endif /* !defined (HAVE_LIBEXPAT) */
c12a2917 1537}
23a7fe75 1538
f4abbc16
MM
1539#if defined (HAVE_LIBEXPAT)
1540
1541/* Parse a btrace-conf "bts" xml record. */
1542
1543static void
1544parse_xml_btrace_conf_bts (struct gdb_xml_parser *parser,
1545 const struct gdb_xml_element *element,
1546 void *user_data, VEC (gdb_xml_value_s) *attributes)
1547{
1548 struct btrace_config *conf;
d33501a5 1549 struct gdb_xml_value *size;
f4abbc16
MM
1550
1551 conf = user_data;
1552 conf->format = BTRACE_FORMAT_BTS;
d33501a5
MM
1553 conf->bts.size = 0;
1554
1555 size = xml_find_attribute (attributes, "size");
1556 if (size != NULL)
b20a6524 1557 conf->bts.size = (unsigned int) *(ULONGEST *) size->value;
f4abbc16
MM
1558}
1559
b20a6524
MM
1560/* Parse a btrace-conf "pt" xml record. */
1561
1562static void
1563parse_xml_btrace_conf_pt (struct gdb_xml_parser *parser,
1564 const struct gdb_xml_element *element,
1565 void *user_data, VEC (gdb_xml_value_s) *attributes)
1566{
1567 struct btrace_config *conf;
1568 struct gdb_xml_value *size;
1569
1570 conf = user_data;
1571 conf->format = BTRACE_FORMAT_PT;
1572 conf->pt.size = 0;
1573
1574 size = xml_find_attribute (attributes, "size");
1575 if (size != NULL)
1576 conf->pt.size = (unsigned int) *(ULONGEST *) size->value;
1577}
1578
1579static const struct gdb_xml_attribute btrace_conf_pt_attributes[] = {
1580 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1581 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1582};
1583
d33501a5
MM
1584static const struct gdb_xml_attribute btrace_conf_bts_attributes[] = {
1585 { "size", GDB_XML_AF_OPTIONAL, gdb_xml_parse_attr_ulongest, NULL },
1586 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1587};
1588
f4abbc16 1589static const struct gdb_xml_element btrace_conf_children[] = {
d33501a5
MM
1590 { "bts", btrace_conf_bts_attributes, NULL, GDB_XML_EF_OPTIONAL,
1591 parse_xml_btrace_conf_bts, NULL },
b20a6524
MM
1592 { "pt", btrace_conf_pt_attributes, NULL, GDB_XML_EF_OPTIONAL,
1593 parse_xml_btrace_conf_pt, NULL },
f4abbc16
MM
1594 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1595};
1596
1597static const struct gdb_xml_attribute btrace_conf_attributes[] = {
1598 { "version", GDB_XML_AF_NONE, NULL, NULL },
1599 { NULL, GDB_XML_AF_NONE, NULL, NULL }
1600};
1601
1602static const struct gdb_xml_element btrace_conf_elements[] = {
1603 { "btrace-conf", btrace_conf_attributes, btrace_conf_children,
1604 GDB_XML_EF_NONE, NULL, NULL },
1605 { NULL, NULL, NULL, GDB_XML_EF_NONE, NULL, NULL }
1606};
1607
1608#endif /* defined (HAVE_LIBEXPAT) */
1609
1610/* See btrace.h. */
1611
1612void
1613parse_xml_btrace_conf (struct btrace_config *conf, const char *xml)
1614{
1615 int errcode;
1616
1617#if defined (HAVE_LIBEXPAT)
1618
1619 errcode = gdb_xml_parse_quick (_("btrace-conf"), "btrace-conf.dtd",
1620 btrace_conf_elements, xml, conf);
1621 if (errcode != 0)
1622 error (_("Error parsing branch trace configuration."));
1623
1624#else /* !defined (HAVE_LIBEXPAT) */
1625
1626 error (_("XML parsing is not supported."));
1627
1628#endif /* !defined (HAVE_LIBEXPAT) */
1629}
1630
23a7fe75
MM
1631/* See btrace.h. */
1632
1633const struct btrace_insn *
1634btrace_insn_get (const struct btrace_insn_iterator *it)
1635{
1636 const struct btrace_function *bfun;
1637 unsigned int index, end;
1638
1639 index = it->index;
1640 bfun = it->function;
1641
31fd9caa
MM
1642 /* Check if the iterator points to a gap in the trace. */
1643 if (bfun->errcode != 0)
1644 return NULL;
1645
23a7fe75
MM
1646 /* The index is within the bounds of this function's instruction vector. */
1647 end = VEC_length (btrace_insn_s, bfun->insn);
1648 gdb_assert (0 < end);
1649 gdb_assert (index < end);
1650
1651 return VEC_index (btrace_insn_s, bfun->insn, index);
1652}
1653
1654/* See btrace.h. */
1655
1656unsigned int
1657btrace_insn_number (const struct btrace_insn_iterator *it)
1658{
1659 const struct btrace_function *bfun;
1660
1661 bfun = it->function;
31fd9caa
MM
1662
1663 /* Return zero if the iterator points to a gap in the trace. */
1664 if (bfun->errcode != 0)
1665 return 0;
1666
23a7fe75
MM
1667 return bfun->insn_offset + it->index;
1668}
1669
1670/* See btrace.h. */
1671
1672void
1673btrace_insn_begin (struct btrace_insn_iterator *it,
1674 const struct btrace_thread_info *btinfo)
1675{
1676 const struct btrace_function *bfun;
1677
1678 bfun = btinfo->begin;
1679 if (bfun == NULL)
1680 error (_("No trace."));
1681
1682 it->function = bfun;
1683 it->index = 0;
1684}
1685
1686/* See btrace.h. */
1687
1688void
1689btrace_insn_end (struct btrace_insn_iterator *it,
1690 const struct btrace_thread_info *btinfo)
1691{
1692 const struct btrace_function *bfun;
1693 unsigned int length;
1694
1695 bfun = btinfo->end;
1696 if (bfun == NULL)
1697 error (_("No trace."));
1698
23a7fe75
MM
1699 length = VEC_length (btrace_insn_s, bfun->insn);
1700
31fd9caa
MM
1701 /* The last function may either be a gap or it contains the current
1702 instruction, which is one past the end of the execution trace; ignore
1703 it. */
1704 if (length > 0)
1705 length -= 1;
1706
23a7fe75 1707 it->function = bfun;
31fd9caa 1708 it->index = length;
23a7fe75
MM
1709}
1710
1711/* See btrace.h. */
1712
1713unsigned int
1714btrace_insn_next (struct btrace_insn_iterator *it, unsigned int stride)
1715{
1716 const struct btrace_function *bfun;
1717 unsigned int index, steps;
1718
1719 bfun = it->function;
1720 steps = 0;
1721 index = it->index;
1722
1723 while (stride != 0)
1724 {
1725 unsigned int end, space, adv;
1726
1727 end = VEC_length (btrace_insn_s, bfun->insn);
1728
31fd9caa
MM
1729 /* An empty function segment represents a gap in the trace. We count
1730 it as one instruction. */
1731 if (end == 0)
1732 {
1733 const struct btrace_function *next;
1734
1735 next = bfun->flow.next;
1736 if (next == NULL)
1737 break;
1738
1739 stride -= 1;
1740 steps += 1;
1741
1742 bfun = next;
1743 index = 0;
1744
1745 continue;
1746 }
1747
23a7fe75
MM
1748 gdb_assert (0 < end);
1749 gdb_assert (index < end);
1750
1751 /* Compute the number of instructions remaining in this segment. */
1752 space = end - index;
1753
1754 /* Advance the iterator as far as possible within this segment. */
1755 adv = min (space, stride);
1756 stride -= adv;
1757 index += adv;
1758 steps += adv;
1759
1760 /* Move to the next function if we're at the end of this one. */
1761 if (index == end)
1762 {
1763 const struct btrace_function *next;
1764
1765 next = bfun->flow.next;
1766 if (next == NULL)
1767 {
1768 /* We stepped past the last function.
1769
1770 Let's adjust the index to point to the last instruction in
1771 the previous function. */
1772 index -= 1;
1773 steps -= 1;
1774 break;
1775 }
1776
1777 /* We now point to the first instruction in the new function. */
1778 bfun = next;
1779 index = 0;
1780 }
1781
1782 /* We did make progress. */
1783 gdb_assert (adv > 0);
1784 }
1785
1786 /* Update the iterator. */
1787 it->function = bfun;
1788 it->index = index;
1789
1790 return steps;
1791}
1792
1793/* See btrace.h. */
1794
1795unsigned int
1796btrace_insn_prev (struct btrace_insn_iterator *it, unsigned int stride)
1797{
1798 const struct btrace_function *bfun;
1799 unsigned int index, steps;
1800
1801 bfun = it->function;
1802 steps = 0;
1803 index = it->index;
1804
1805 while (stride != 0)
1806 {
1807 unsigned int adv;
1808
1809 /* Move to the previous function if we're at the start of this one. */
1810 if (index == 0)
1811 {
1812 const struct btrace_function *prev;
1813
1814 prev = bfun->flow.prev;
1815 if (prev == NULL)
1816 break;
1817
1818 /* We point to one after the last instruction in the new function. */
1819 bfun = prev;
1820 index = VEC_length (btrace_insn_s, bfun->insn);
1821
31fd9caa
MM
1822 /* An empty function segment represents a gap in the trace. We count
1823 it as one instruction. */
1824 if (index == 0)
1825 {
1826 stride -= 1;
1827 steps += 1;
1828
1829 continue;
1830 }
23a7fe75
MM
1831 }
1832
1833 /* Advance the iterator as far as possible within this segment. */
1834 adv = min (index, stride);
31fd9caa 1835
23a7fe75
MM
1836 stride -= adv;
1837 index -= adv;
1838 steps += adv;
1839
1840 /* We did make progress. */
1841 gdb_assert (adv > 0);
1842 }
1843
1844 /* Update the iterator. */
1845 it->function = bfun;
1846 it->index = index;
1847
1848 return steps;
1849}
1850
1851/* See btrace.h. */
1852
1853int
1854btrace_insn_cmp (const struct btrace_insn_iterator *lhs,
1855 const struct btrace_insn_iterator *rhs)
1856{
1857 unsigned int lnum, rnum;
1858
1859 lnum = btrace_insn_number (lhs);
1860 rnum = btrace_insn_number (rhs);
1861
31fd9caa
MM
1862 /* A gap has an instruction number of zero. Things are getting more
1863 complicated if gaps are involved.
1864
1865 We take the instruction number offset from the iterator's function.
1866 This is the number of the first instruction after the gap.
1867
1868 This is OK as long as both lhs and rhs point to gaps. If only one of
1869 them does, we need to adjust the number based on the other's regular
1870 instruction number. Otherwise, a gap might compare equal to an
1871 instruction. */
1872
1873 if (lnum == 0 && rnum == 0)
1874 {
1875 lnum = lhs->function->insn_offset;
1876 rnum = rhs->function->insn_offset;
1877 }
1878 else if (lnum == 0)
1879 {
1880 lnum = lhs->function->insn_offset;
1881
1882 if (lnum == rnum)
1883 lnum -= 1;
1884 }
1885 else if (rnum == 0)
1886 {
1887 rnum = rhs->function->insn_offset;
1888
1889 if (rnum == lnum)
1890 rnum -= 1;
1891 }
1892
23a7fe75
MM
1893 return (int) (lnum - rnum);
1894}
1895
1896/* See btrace.h. */
1897
1898int
1899btrace_find_insn_by_number (struct btrace_insn_iterator *it,
1900 const struct btrace_thread_info *btinfo,
1901 unsigned int number)
1902{
1903 const struct btrace_function *bfun;
31fd9caa 1904 unsigned int end, length;
23a7fe75
MM
1905
1906 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
31fd9caa
MM
1907 {
1908 /* Skip gaps. */
1909 if (bfun->errcode != 0)
1910 continue;
1911
1912 if (bfun->insn_offset <= number)
1913 break;
1914 }
23a7fe75
MM
1915
1916 if (bfun == NULL)
1917 return 0;
1918
31fd9caa
MM
1919 length = VEC_length (btrace_insn_s, bfun->insn);
1920 gdb_assert (length > 0);
1921
1922 end = bfun->insn_offset + length;
23a7fe75
MM
1923 if (end <= number)
1924 return 0;
1925
1926 it->function = bfun;
1927 it->index = number - bfun->insn_offset;
1928
1929 return 1;
1930}
1931
1932/* See btrace.h. */
1933
1934const struct btrace_function *
1935btrace_call_get (const struct btrace_call_iterator *it)
1936{
1937 return it->function;
1938}
1939
1940/* See btrace.h. */
1941
1942unsigned int
1943btrace_call_number (const struct btrace_call_iterator *it)
1944{
1945 const struct btrace_thread_info *btinfo;
1946 const struct btrace_function *bfun;
1947 unsigned int insns;
1948
1949 btinfo = it->btinfo;
1950 bfun = it->function;
1951 if (bfun != NULL)
1952 return bfun->number;
1953
1954 /* For the end iterator, i.e. bfun == NULL, we return one more than the
1955 number of the last function. */
1956 bfun = btinfo->end;
1957 insns = VEC_length (btrace_insn_s, bfun->insn);
1958
1959 /* If the function contains only a single instruction (i.e. the current
1960 instruction), it will be skipped and its number is already the number
1961 we seek. */
1962 if (insns == 1)
1963 return bfun->number;
1964
1965 /* Otherwise, return one more than the number of the last function. */
1966 return bfun->number + 1;
1967}
1968
1969/* See btrace.h. */
1970
1971void
1972btrace_call_begin (struct btrace_call_iterator *it,
1973 const struct btrace_thread_info *btinfo)
1974{
1975 const struct btrace_function *bfun;
1976
1977 bfun = btinfo->begin;
1978 if (bfun == NULL)
1979 error (_("No trace."));
1980
1981 it->btinfo = btinfo;
1982 it->function = bfun;
1983}
1984
1985/* See btrace.h. */
1986
1987void
1988btrace_call_end (struct btrace_call_iterator *it,
1989 const struct btrace_thread_info *btinfo)
1990{
1991 const struct btrace_function *bfun;
1992
1993 bfun = btinfo->end;
1994 if (bfun == NULL)
1995 error (_("No trace."));
1996
1997 it->btinfo = btinfo;
1998 it->function = NULL;
1999}
2000
2001/* See btrace.h. */
2002
2003unsigned int
2004btrace_call_next (struct btrace_call_iterator *it, unsigned int stride)
2005{
2006 const struct btrace_function *bfun;
2007 unsigned int steps;
2008
2009 bfun = it->function;
2010 steps = 0;
2011 while (bfun != NULL)
2012 {
2013 const struct btrace_function *next;
2014 unsigned int insns;
2015
2016 next = bfun->flow.next;
2017 if (next == NULL)
2018 {
2019 /* Ignore the last function if it only contains a single
2020 (i.e. the current) instruction. */
2021 insns = VEC_length (btrace_insn_s, bfun->insn);
2022 if (insns == 1)
2023 steps -= 1;
2024 }
2025
2026 if (stride == steps)
2027 break;
2028
2029 bfun = next;
2030 steps += 1;
2031 }
2032
2033 it->function = bfun;
2034 return steps;
2035}
2036
2037/* See btrace.h. */
2038
2039unsigned int
2040btrace_call_prev (struct btrace_call_iterator *it, unsigned int stride)
2041{
2042 const struct btrace_thread_info *btinfo;
2043 const struct btrace_function *bfun;
2044 unsigned int steps;
2045
2046 bfun = it->function;
2047 steps = 0;
2048
2049 if (bfun == NULL)
2050 {
2051 unsigned int insns;
2052
2053 btinfo = it->btinfo;
2054 bfun = btinfo->end;
2055 if (bfun == NULL)
2056 return 0;
2057
2058 /* Ignore the last function if it only contains a single
2059 (i.e. the current) instruction. */
2060 insns = VEC_length (btrace_insn_s, bfun->insn);
2061 if (insns == 1)
2062 bfun = bfun->flow.prev;
2063
2064 if (bfun == NULL)
2065 return 0;
2066
2067 steps += 1;
2068 }
2069
2070 while (steps < stride)
2071 {
2072 const struct btrace_function *prev;
2073
2074 prev = bfun->flow.prev;
2075 if (prev == NULL)
2076 break;
2077
2078 bfun = prev;
2079 steps += 1;
2080 }
2081
2082 it->function = bfun;
2083 return steps;
2084}
2085
2086/* See btrace.h. */
2087
2088int
2089btrace_call_cmp (const struct btrace_call_iterator *lhs,
2090 const struct btrace_call_iterator *rhs)
2091{
2092 unsigned int lnum, rnum;
2093
2094 lnum = btrace_call_number (lhs);
2095 rnum = btrace_call_number (rhs);
2096
2097 return (int) (lnum - rnum);
2098}
2099
2100/* See btrace.h. */
2101
2102int
2103btrace_find_call_by_number (struct btrace_call_iterator *it,
2104 const struct btrace_thread_info *btinfo,
2105 unsigned int number)
2106{
2107 const struct btrace_function *bfun;
2108
2109 for (bfun = btinfo->end; bfun != NULL; bfun = bfun->flow.prev)
2110 {
2111 unsigned int bnum;
2112
2113 bnum = bfun->number;
2114 if (number == bnum)
2115 {
2116 it->btinfo = btinfo;
2117 it->function = bfun;
2118 return 1;
2119 }
2120
2121 /* Functions are ordered and numbered consecutively. We could bail out
2122 earlier. On the other hand, it is very unlikely that we search for
2123 a nonexistent function. */
2124 }
2125
2126 return 0;
2127}
2128
2129/* See btrace.h. */
2130
2131void
2132btrace_set_insn_history (struct btrace_thread_info *btinfo,
2133 const struct btrace_insn_iterator *begin,
2134 const struct btrace_insn_iterator *end)
2135{
2136 if (btinfo->insn_history == NULL)
2137 btinfo->insn_history = xzalloc (sizeof (*btinfo->insn_history));
2138
2139 btinfo->insn_history->begin = *begin;
2140 btinfo->insn_history->end = *end;
2141}
2142
2143/* See btrace.h. */
2144
2145void
2146btrace_set_call_history (struct btrace_thread_info *btinfo,
2147 const struct btrace_call_iterator *begin,
2148 const struct btrace_call_iterator *end)
2149{
2150 gdb_assert (begin->btinfo == end->btinfo);
2151
2152 if (btinfo->call_history == NULL)
2153 btinfo->call_history = xzalloc (sizeof (*btinfo->call_history));
2154
2155 btinfo->call_history->begin = *begin;
2156 btinfo->call_history->end = *end;
2157}
07bbe694
MM
2158
2159/* See btrace.h. */
2160
2161int
2162btrace_is_replaying (struct thread_info *tp)
2163{
2164 return tp->btrace.replay != NULL;
2165}
6e07b1d2
MM
2166
2167/* See btrace.h. */
2168
2169int
2170btrace_is_empty (struct thread_info *tp)
2171{
2172 struct btrace_insn_iterator begin, end;
2173 struct btrace_thread_info *btinfo;
2174
2175 btinfo = &tp->btrace;
2176
2177 if (btinfo->begin == NULL)
2178 return 1;
2179
2180 btrace_insn_begin (&begin, btinfo);
2181 btrace_insn_end (&end, btinfo);
2182
2183 return btrace_insn_cmp (&begin, &end) == 0;
2184}
734b0e4b
MM
2185
2186/* Forward the cleanup request. */
2187
2188static void
2189do_btrace_data_cleanup (void *arg)
2190{
2191 btrace_data_fini (arg);
2192}
2193
2194/* See btrace.h. */
2195
2196struct cleanup *
2197make_cleanup_btrace_data (struct btrace_data *data)
2198{
2199 return make_cleanup (do_btrace_data_cleanup, data);
2200}